query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Builds a histogram of values given an iterable of mappings and a key. For each mapping "m" with key "key" in iterator, the value m[key] is considered. Returns a list of tuples (hash, count, proportion, value), where "hash" is a sha1sum hash of the value. "count" is the number of occurences of values that hash to "hash". "proportion" is the proportion of all values considered above that hash to "hash". "value" is one of the values considered above that hash to "hash". Which value is chosen when multiple values hash to the same "hash" is undefined. The list is sorted in descending order by count, yielding the most frequently occuring hashes first.
def build_histogram(iterator, key): buckets = defaultdict(int) values = {} num_objects = 0 for obj in iterator: num_objects += 1 try: val = obj[key] except (KeyError, TypeError): continue value_hash = hashlib.sha1() value_hash.update(syaml.dump_config(sort_yaml_obj(val)).encode()) value_hash = value_hash.hexdigest() buckets[value_hash] += 1 values[value_hash] = val return [ (h, buckets[h], float(buckets[h]) / num_objects, values[h]) for h in sorted(buckets.keys(), key=lambda k: -buckets[k]) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compile_list(hash_map, keys):\n word_counts = [] # initialize empty list for word counts\n\n # iterate through all keys in the set\n for key in keys:\n value = hash_map.get(key) # fetch value for the current key\n word_counts.append((key, value)) # append the tuple formatted (key, value) to the word_counts list\n\n return word_counts", "def histogram(s):\n d = dict()\n for c in s:\n d[c] = 1 + d.get(c, 0)\n return d", "def _getValueCounts(mapping):\n return Counter({k: len(v) for k, v in viewitems(mapping)})", "def test_make_histogram():\n hand = [\n Card(\"Q\", \"D\"),\n Card(\"2\", \"C\"),\n Card(\"T\", \"D\"),\n Card(\"Q\", \"S\"),\n Card(\"A\", \"S\"),\n Card(\"Q\", \"H\"),\n Card(\"3\", \"D\"),\n Card(\"Q\", \"C\"),\n Card(\"2\", \"H\"),\n ]\n expected_histogram = {14: 1, 12: 4, 10: 1, 3: 1, 2: 2}\n assert expected_histogram == make_histogram(hand)", "def covert_dict_to_list_of_value(in_dict):\r\n # need this value to plot the number of histogram bars\r\n number_of_keys = len(in_dict.keys())\r\n output_list = []\r\n key_list = []\r\n max_val = 0\r\n vals_for_bar_chart = []\r\n for key, val in in_dict.items():\r\n key_list.append(key)\r\n for i in range(0, val):\r\n output_list.append(key)\r\n # get the maximum val for graph\r\n if val > max_val:\r\n max_val = val\r\n for i in range(1, max(key_list)+1):\r\n try:\r\n val = in_dict[i]\r\n vals_for_bar_chart.append(val)\r\n except KeyError:\r\n vals_for_bar_chart.append(0)\r\n\r\n return sorted(output_list), number_of_keys, max_val, vals_for_bar_chart", "def fast_histogram(\n val: List[float],\n discrete: Optional[bool] = None,\n bins: Optional[List[float]] = None,\n) -> Mapping[str, float]:\n val = np.asarray(val, dtype=float)\n size = len(val)\n\n # Unique does not work on nan since nan != nan\n val = val[~np.isnan(val)]\n size_nan = size - len(val)\n discrete = is_discrete(val) if discrete is None else discrete\n\n if discrete:\n bins, counts = np.unique(val, return_counts=True)\n bin_to_count = {str(bins[i]): counts[i] for i in range(len(bins))}\n if size_nan > 0:\n bin_to_count[\"nan\"] = size_nan\n return bin_to_count\n\n # Counts nan as part of infinity bin\n val = val[~np.isinf(val)]\n size_inf = size - len(val)\n if len(val) == 0:\n return {\"+Inf\": size_inf}\n\n # Take the negative of all values to use \"le\" as the bin upper bound\n bins = bins or get_bins(val)\n counts, _ = np.histogram(-val, bins=-np.flip([bins[0]] + bins))\n counts = np.flip(counts)\n bin_to_count = dict(p for p in zip(map(str, bins), counts))\n\n # Add infinity bin last to preserve insertion order\n bin_to_count[\"+Inf\"] = size_inf\n return bin_to_count", "def histogram(word_list):\n assert type(word_list) == list\n\n histogram = {}\n total_words = len(word_list)\n word_frac = 1.0/total_words\n\n for word in word_list:\n if word in histogram: histogram[word] += word_frac\n else: histogram[word] = word_frac\n\n return histogram", "def make_histogram(points, bucket_size):\r\n return Counter(bucketize(point, bucket_size) for point in points)", "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def compute_frequency_lines(self) -> list:\n freq = sorted(self.hash_map.get_key_value_pairs(),\n key=lambda e: e[1], reverse=True)\n result = map(lambda e:\n str(e[0]) + '\\t' + str(e[1][1]) + '\\t' + e[1][0],\n zip(range(1, 501), freq))\n return list(result)", "def counts(self, *vals):\n return ((v, self[self.hash(val)]) for v in vals)", "def generate_histogram(data, buckets):\n if not data:\n return {}\n\n minimum = min(data)\n maximum = max(data)\n if minimum == maximum:\n return {data[0]: len(data)}\n\n buckets = min(len(data), buckets)\n bucket_size = (maximum-minimum)/buckets\n out = dict((i, 0) for i in range(buckets))\n for i in data:\n out[min(int((i-minimum)/bucket_size), buckets-1)] += 1\n return dict(((k*bucket_size)+minimum, v) for k, v in out.items())", "def solution_histogram(s):\n d = dict()\n for c in s:\n d[c] = d.get(c,0) + 1\n return d", "def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities", "def _dict_values_count_hashed(a_dict, count_this):\n counter = 0\n for value in a_dict.values():\n if value == count_this:\n counter += 1\n elif (\n isinstance(value, dict)\n and isinstance(count_this, dict)\n and \"hash\" in value\n and \"hash\" in count_this\n and \"size\" in value\n and \"size\" in count_this\n and value[\"hash\"] == count_this[\"hash\"]\n ):\n counter += 1\n \"hash\" in value and isinstance(count_this, dict) and \"hash\" in count_this\n return counter", "def histogram_bucket_counts(**kwargs):\n attributes_list = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n metric_attributes_list = [\"metric\", \"metric_type\", \"key\", \"agg_type\"]\n fixed_attributes = [\"app_version\", \"channel\"]\n cubed_attributes = [x for x in attributes_list if x not in fixed_attributes]\n return dict(\n attributes_list=attributes_list,\n attributes=\",\".join(attributes_list),\n cubed_attributes=cubed_attributes,\n attribute_combinations=compute_datacube_groupings(cubed_attributes),\n metric_attributes_list=metric_attributes_list,\n metric_attributes=\",\".join(metric_attributes_list),\n custom_distribution_metadata_list=get_custom_distribution_metadata(\"fenix\"),\n **kwargs,\n )", "def get_letter_frequencies(count_dict):\n total_letter_count = sum(count_dict.values())\n freqs = {}\n for (letter, count) in count_dict.iteritems():\n freqs[letter] = count / total_letter_count\n return freqs", "def counts(li):\n d={}\n for x in li:\n d[x]=d.get(x,0)+1\n return dict(sorted([x,d[x]] for x in d))", "def get_frequencies(img):\n freqs = {}\n\n # assume img is a w by h 2D matrix\n \n (h, w) = img.shape\n size = float(h*w)\n\n # loop through each pixel and \n # save number of times each value appears\n for i in range(0, w):\n for j in range(0, h):\n n = img[j][i]\n if n in freqs:\n freqs[n] += 1\n else:\n freqs[n] = 1\n\n # turn each value into a percentage/frequency\n # rather than absolute number of occurrences\n for key in freqs:\n freqs[key] = freqs[key] / size\n return freqs", "def counts_to_probs(some_dict, num):\n new_d = dict()\n for key in some_dict:\n value = some_dict[key]\n new_d[key] = value/num\n return new_d", "def gen_single_img_histogram(img_descriptors_codebook_pair):\n img_descriptors, codebook = img_descriptors_codebook_pair\n\n # Initially, each image will have a count of 0 for each codeword.\n histogram_of_codewords = [0 for _ in range(len(codebook))]\n\n # Keep track of which descriptor idxs map to which code word.\n descriptor_to_codeword_map = [[] for _ in range(len(codebook))]\n map_descriptor = lambda word_idx, descriptor_idx : \\\n descriptor_to_codeword_map[word_idx].append(descriptor_idx)\n\n for idx, descriptor in enumerate(img_descriptors):\n # Step 3.1\n closest_cluster_idx = hp.get_idx_of_1_NN(descriptor, codebook, dist_func=hp.euclidean_distance)\n histogram_of_codewords[closest_cluster_idx] += 1\n\n map_descriptor(closest_cluster_idx, idx)\n\n return histogram_of_codewords, descriptor_to_codeword_map", "def calculate_histograms_from_assignments(self, funcs, bin_size=1):\n result = defaultdict(lambda: defaultdict(\n lambda: Histogram(bin_size)\n ))\n for assignment in self.get_assignment_reader():\n for name, func in funcs.iteritems():\n value = func(assignment)\n if value is None:\n continue\n result[name][assignment.source].add(value)\n return result", "def prob_sum(graph, key):\n return sum(edge_prob(graph, edge, True) for edge in key)", "def compute_empirical_distribution(values):\n distribution = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n for value in values:\n if value not in distribution:\n distribution[value] = 1\n else:\n distribution[value] += 1\n \n total = len(values)\n for v in distribution.keys():\n distribution[v] /= total\n \n\n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return distribution", "def hashIt(*args):\n total = int(0)\n for x in args:\n if isinstance(x, dict):\n for k, key in enumerate(sorted(x.keys())):\n total += hashIt(k, key, x[key])\n elif isinstance(x, (list, tuple)):\n for k, value in enumerate(x):\n total += hashIt(k, value)\n else:\n try:\n thisHash = hash(x)\n except:\n try:\n thisHash = hash(pickle.dumps(x))\n except:\n thisHash = 0\n total += thisHash\n return hash(total)", "def generate_formula_histogram(self):\n\n histogram = dict()\n for element in self.atomelements:\n if element in histogram.keys():\n histogram[element] += 1\n else:\n histogram[element] = 1\n return histogram", "def word_freq(self, word_list):\n hist = {}\n for word in word_list:\n hist[word] = hist.get(word, 0) + 1\n return hist" ]
[ "0.62433976", "0.5874765", "0.5807315", "0.5762578", "0.5741219", "0.567829", "0.5661489", "0.5657134", "0.5650179", "0.5650179", "0.558463", "0.558463", "0.55465764", "0.55321306", "0.5517353", "0.55055034", "0.54166317", "0.5399882", "0.53576636", "0.5314523", "0.53041196", "0.5300376", "0.5286998", "0.5274206", "0.52543104", "0.5243955", "0.52393776", "0.52324855", "0.5221242", "0.5219994" ]
0.8098769
0
Test fail when trying to signup with an existing email
def test_signup_existing_email(self): url = '/0/chefs' data = { 'email': self.user.email, 'password': 'secret', 'name': 'John', 'surname': 'Doe', 'language': 'es', } resp = self.client.post(url, data=data) self.assertEqual(resp.status_code, 400) self.assertEqual(resp.data['code'], 400) self.assertEqual(resp.data['message'], 'Invalid parameters') self.assertIn('raw', resp.data) self.assertEqual(len(resp.data['raw']), 1) self.assertEqual(resp.data['raw'][0]['field'], 'email') self.assertIn('message', resp.data['raw'][0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_signup_missing_email(self):\n\n invalid_u = User.signup(None, \"testuser\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_register_existing_email(self):\n response = self.client.post('/api/v2/auth/signup',\n data=json.dumps(users[0]),\n headers=self.admin_headers,\n content_type='application/json')\n self.assertEqual(response.status_code, 409)\n self.assertIn('user with email already registred', str(response.data))", "def test_create_account_failed_existing_email(self):\n data = self.user_data.copy()\n data['email'] = '[email protected]'\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('email')[0], 'user with this email already exists.')", "def test_registeration_invalid_email(self):\n response = self.signup_a_user(self.user_invalid_email)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"email\"],\n [\"Enter a valid email address.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_user_existing_email(self):\n data = json.dumps({\n \"username\" : \"john\", \"email\" : \"[email protected]\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n res = self.app.post( # pylint: disable=W0612\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_signup_with_invalid_email_false(self):\n user = {\n \"Email\": \"user.com\",\n \"Password\": \"pass1234\",\n \"Confirm Password\": \"pass1234\"\n }\n res = self.client().post('/api/v1/auth/signup', data=user)\n self.assertEqual(res.status_code, 400)\n res = res.get_json()\n self.assertEqual(res['error'][0],\n 'Invalid Email Address')", "def testEmailAlreadyThere(self):\r\n res = self.app.post(\r\n '/signup_process',\r\n params={\r\n 'email': '[email protected]'\r\n }\r\n )\r\n self.assertIn('already signed up', res.body)", "def test_create_account_failed_invalid_email(self):\n data = self.user_data.copy()\n data['email'] = 'test'\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('email')[0], 'Enter a valid email address.')", "def test_register_email_exist(self, test_client):\n response = test_client.post('/api/auth/register', json=dict(\n email=\"[email protected]\",\n username=\"test\",\n password=\"goodPassword!123\"\n ))\n res = json.loads(response.data)\n\n assert response.status_code == 400\n assert res['status'] == False", "def test_duplicate_email(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.assertIn(b'Sorry email already exist', rv.data)", "def test_create_no_email(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n self.assertRaises(\n ValueError,\n api.user.create,\n username='chuck', password='secret'\n )", "def test_empty_email_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the email field please\")", "def test_private_create_user_without_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))", "def test_already_registered_email_validation(self):\n\n main_page = pages.mainpage.MainPage(self.driver)\n main_page.click_sign_in_button()\n \n sign_in_page = pages.signinpage.SignInPage(self.driver)\n sign_in_page.enter_create_account_email_addres('[email protected]')\n sign_in_page.click_create_account_button() \n\n self.assertTrue(sign_in_page.check_if_account_create_error_is_visible(), 'Email validation failed')", "def test_register_invalid_email(self):\n response = self.client.post('/api/v2/auth/signup',\n data=json.dumps(users[3]),\n headers=self.admin_headers,\n content_type='application/json')\n self.assertEqual(response.status_code, 422)\n self.assertIn('invalid email', str(response.data))", "def test_create_account_failed_no_email(self):\n data = self.user_data.copy()\n data.pop('email')\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('email')[0], 'This field is required.')", "def test_create_user_with_existing_email(client, existing_user):\n response = client.post(\"/auth/register\", json=existing_user)\n assert response.status_code == 400", "def test_valid_email(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_3)\n self.assertEqual(response.status_code, 400)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"Invalid email address.\")", "def test_registeration_no_email(self):\n response = self.signup_a_user(self.user_lacks_email)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"email\"],\n [\"This field may not be blank.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_signup_dupe_email(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_invalid_email_when_logging_in(self):\n pass", "def testEmailRequired(self):\r\n res = self.app.post('/signup_process')\r\n self.assertIn('Please supply', res.body)", "def test_create_use_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, password='open@123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\"\", \"test42837492374923749\")", "def test_user_with_invalid_email_registration(self):\n login = self.client.post(\"api/v2/login\",content_type = 'application/json',data = json.dumps(self.admin_login))\n created_token = json.loads(login.data.decode())[\"token\"]\n new_user = self.client.post('/api/v2/signup',data = json.dumps(self.register_user_without_valid_email),content_type = 'application/json',headers =dict(Authorization = \"Bearer{}\".format(created_token))) \n result = json.loads(new_user.data.decode())\n self.assertEqual(result ['message'], \"enter valid email\")\n self.assertEqual(new_user.status_code, 400)", "def test_signup_invalid_email(self):\n \n user = {\n 'firstname' : 'Caleb',\n 'lastname' : 'Mbugua',\n 'username' : 'MbuguaCaleb',\n 'email' : 'mbuguac',\n 'password' : '123456',\n 'phone_number' : '0704699193'\n }\n\n \n response = self.client.post('/api/v1/register', json=user, headers={'Content-Type': 'application/json'})\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'Invalid data. Please fill all the required fields')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"test123\")", "def test_create_new_user_duplicate_email(self):\n\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n \"name\": \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n user = UserFactory()\n user.email = '[email protected]'\n user.save()\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n content = {\n 'email': [\n \"An account for the specified email address already exists.\"\n ]\n }\n self.assertEqual(json.loads(response.content), content)", "def test_existing_email(self):\n response = self.client.post(\n self.reset_password_url, {\"email\": \"[email protected]\"}, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(response.data['detail'], \"Not found.\")", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')" ]
[ "0.8121672", "0.8100355", "0.79914504", "0.7966945", "0.79290634", "0.78825", "0.78538597", "0.78521883", "0.7835272", "0.78132766", "0.7812779", "0.77946997", "0.7790612", "0.77850413", "0.7742307", "0.7736527", "0.77243173", "0.7717619", "0.7701845", "0.7695915", "0.76853174", "0.7659916", "0.76536584", "0.76508266", "0.7628461", "0.76107055", "0.7598267", "0.75952893", "0.75940853", "0.75915974" ]
0.8455592
0
Test fail when sending invalid params at signup
def test_signup_invalid_params(self): url = '/0/chefs' # No data data = {} resp = self.client.post(url, data=data) self.assertEqual(resp.status_code, 400) self.assertEqual(resp.data['code'], 400) self.assertEqual(resp.data['message'], 'Invalid parameters') self.assertIn('raw', resp.data) error_keys = [e['field'] for e in resp.data['raw'] if 'field' in e] self.assertEqual(set(['email', 'name', 'language']), set(error_keys)) # Everything but password or fb_access_token data = { 'email': '[email protected]', 'name': 'John', 'surname': 'Doe', 'language': 'es', } resp = self.client.post(url, data=data) self.assertEqual(resp.status_code, 400) self.assertEqual(resp.data['code'], 400) self.assertEqual(resp.data['message'], 'Invalid parameters') self.assertEqual(len(resp.data['raw']), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_registration_fails_for_missing_parameters(self):\n resp = self.test_client.post(\"/api/v1/auth/register\")\n self.assertEqual(resp.status_code, 400)\n data = json.loads(resp.data)\n self.assertEqual(\n data[\"message\"],\n \"you need to enter both the email and the password\")\n self.assertEqual(data[\"status\"], \"failure\")", "def test_register_without_data(self):\n response = self.client.post('/api/v2/auth/signup',\n data=json.dumps(users[4]),\n headers=self.admin_headers,\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n self.assertIn('Missing required parameter', str(response.data))", "def test_signup_when_no_data_provied(self):\n response = self.client.post('/api/v1/register')\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'No Sign up data provided')", "def test_signup_when_empty_data_provided(self):\n user = {}\n\n response = self.client.post('/api/v1/register', json=json.dumps(user), headers={'Content-Type': 'application/json'})\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'Invalid data. Please fill all the required fields')", "def test_signup_when_there_are_missing_fields(self):\n user = {\n 'firstname' : 'Caleb',\n 'lastname' : 'Mbugua',\n 'password' : '12345566'\n }\n\n response = self.client.post('/api/v1/register', json=user, headers={'Content-Type': 'application/json'})\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'Invalid data. Please fill all the required fields')", "def test_wrong_data_signup(self):\n user = {\n 'firstname': 1,\n 'lastname': 'Mbugua',\n 'othername': 'Colmike',\n 'email': '[email protected]',\n 'username': \"Mikeymike\",\n 'password': 'mikemike',\n 'phoneNumber': '0708453910'\n }\n\n res = self.client.post('api/v2/auth/signup', json=user,\n headers={'Content-Type': 'application/json'})\n data = res.get_json()\n self.assertEqual(res.status_code, 400)\n self.assertEqual(\n data['message'], 'Invalid data. Please fill all required fields')", "def test_user_wrong_registration(self):\n response = self.client.post(SIGNUP_URL,\n data=json.dumps(\n {'username': 'danny', 'email': '[email protected]', 'password': ''}),\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"], \"All fields are required\")", "def test_sign_up_user(self):\n\n # test successful registration\n \n payload = self.post_req()\n self.assertEqual(payload.json['status'], 201)\n self.assertTrue(payload.json['auth_token'])\n self.assertEqual(payload.json['message'], \"[email protected] registered successfully\")\n\n # test missing fields\n user = {\n \"last_name\" : \"Mwangi\",\n \"email\" : \"[email protected]\",\n \"username\" : \"jjj\",\n \"image\": \"\",\n \"password\": \"abc123@1A\",\n \"confirm_password\": \"abc123@1A\"\n }\n payload2 = self.post_req(data=user)\n self.assertEqual(payload2.status_code, 400)\n self.assertEqual(payload2.json['error'], 'You missed the first_name key, value pair')\n\n # test invalid data\n user2 = {**self.user_item}\n user2['first_name'] = \"1234214\"\n payload3 = self.post_req(data=user2)\n self.assertEqual(payload3.status_code, 422)\n self.assertEqual(payload3.json['error'], 'please enter valid first name!')", "def test_signup_invalid_password_provided(self):\n\n user = {\n 'firstname' : 'Caleb',\n 'lastname' : 'Mbugua',\n 'username' : 'MbuguaCaleb',\n 'email' : '[email protected]',\n 'password' : '123456',\n 'phone_number' : '0704699193'\n }\n\n response = self.client.post('/api/v1/register', json=user, headers={'Content-Type': 'application/json'})\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'Invalid data. Please fill all the required fields')", "def signup():", "def test_signup_noUsername(self):\n user = {\n 'firstname': 'Michael',\n 'lastname': 'Mbugua',\n 'othername': 'Colmike',\n 'username': None,\n 'email': '[email protected]',\n 'password': 'mikemike',\n 'phoneNumber': '0708453910'\n }\n\n\n res = self.client.post('api/v2/auth/signup', json=user,\n headers={'Content-Type': 'application/json'})\n\n data = res.get_json()\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['status'], 400)", "def test_repeat_signup(self):\n user = {\n\t \"firstname\": \"Maxwel\",\n\t \"lastname\": \"Thumi\",\n \"othername\": \"Max\",\n \"email\": \"[email protected]\",\n \"phoneNumber\": \"0796741644\",\n \"username\": \"MaxT\",\n \"password\": \"S1rMaxx\"\n \n }\n self.client.post('api/v2/auth/signup', json=user,\n headers={'Content-Type': 'application/json'})\n res = self.client.post('api/v2/auth/signup', json=user,\n headers={'Content-Type': 'application/json'})\n data = res.get_json()\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['status'], 403)\n self.assertEqual(data['message'], 'User Already exists')", "def test_signup_missing_password(self):\n with self.assertRaises(ValueError) as context:\n invalid_u = User.signup(\"[email protected]\", \"testuser\", None, \"Test\", \"User\", None)", "def test_multiple_signup_false(self):\n res = self.client().post('/api/v1/auth/signup', data=self.user)\n self.assertEqual(res.status_code, 201)\n res = self.client().post('/api/v1/auth/signup', data=self.user)\n self.assertEqual(res.status_code, 400)\n res = res.get_json()\n self.assertEqual(res['error'][0], 'That email is already taken')", "def test_user_signup_with_invalid_first_name(self):\n pass", "def test_0050_registration_post_1(self):\n response = self.fetch(\n '/registration', method=\"POST\", follow_redirects=False,\n body=urlencode({'name':'anoop', 'email':'[email protected]',\n 'password':'openlabs', 'confirm_password':'wrong'}\n )\n )\n self.assertEqual(response.code, 200)\n self.assertEqual(\n response.body.count(\n u'There were error(s) in processing your registration.'\n ), 1\n )", "def test_no_email(self):\n response = self.register({\n 'first_name': \"David\",\n 'last_name': \"Smith\",\n 'password': \"******\",\n 'phone_number': \"012-345-6789\"\n })\n self.assertEqual(response.status_code, 400)\n self.assertDictContainsSubset({'message': \"Missing parameters\"}, response.json())", "def test_register(self):\n # Test no username\n data = {'password': 'pwd', 'email': 'Josh'}\n data = json.dumps(data)\n response = self.client.post(\n 'api/v2/auth/signup', content_type=\"application/json\", data=data)\n data = json.loads(response.data)\n self.assertEqual(data, {message: 'Username not provided'})\n self.assertEqual(response.status_code, 400)\n # Test no password\n data = {'username': 'admin', 'email': '[email protected]'}\n data = json.dumps(data)\n response = self.client.post(\n 'api/v2/auth/signup', content_type=\"application/json\", data=data)\n data = json.loads(response.data)\n self.assertEqual(data, {message: 'Password not provided'})\n self.assertEqual(response.status_code, 400)\n # Test no email\n data = {'username': 'admin', 'password': '[email protected]'}\n data = json.dumps(data)\n response = self.client.post(\n 'api/v2/auth/signup', content_type=\"application/json\", data=data)\n data = json.loads(response.data)\n self.assertEqual(data, {message: 'Email not provided'})\n self.assertEqual(response.status_code, 400)\n # Test with wrong format of auth/login credentials\n data = ['Bad', 'data', 'format']\n data = json.dumps(data)\n response = self.client.post(\n 'api/v2/auth/signup', content_type=\"application/json\", data=data)\n data = json.loads(response.data)\n self.assertEqual(data, {message: 'Invalid data format'})\n self.assertEqual(response.status_code, 400)\n # Register bad email\n data = mock_data['bad_email_r']\n data = json.dumps(data)\n response = self.client.post(\n 'api/v2/auth/signup', content_type=\"application/json\", data=data)\n data = json.loads(response.data)\n self.assertEqual(data['message'], 'Email invalid')\n self.assertEqual(response.status_code, 400)\n # Test bad usernsmr\n data = mock_data['bad_usern_r']\n data = json.dumps(data)\n response = self.client.post(\n 'api/v2/auth/signup', content_type=\"application/json\", data=data)\n data = json.loads(response.data)\n self.assertEqual(\n data['message'], \"Username cannot be less than four characters\")\n self.assertEqual(response.status_code, 400)", "def test_signup_invalid_email(self):\n \n user = {\n 'firstname' : 'Caleb',\n 'lastname' : 'Mbugua',\n 'username' : 'MbuguaCaleb',\n 'email' : 'mbuguac',\n 'password' : '123456',\n 'phone_number' : '0704699193'\n }\n\n \n response = self.client.post('/api/v1/register', json=user, headers={'Content-Type': 'application/json'})\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'Invalid data. Please fill all the required fields')", "def test_signup_with_invalid_email_false(self):\n user = {\n \"Email\": \"user.com\",\n \"Password\": \"pass1234\",\n \"Confirm Password\": \"pass1234\"\n }\n res = self.client().post('/api/v1/auth/signup', data=user)\n self.assertEqual(res.status_code, 400)\n res = res.get_json()\n self.assertEqual(res['error'][0],\n 'Invalid Email Address')", "def test_signup(self):\n response = self.client.post('/api/v1/auth/signup',\n data=json.dumps(self.new_user1),\n content_type='application/json')\n self.assertEqual(response.status_code, 201)", "def test_user_sign_up_success(self):\n res = self.client.post(reverse('sign_up'), data={\n 'username': '[email protected]',\n 'first_name': 'Test',\n 'last_name': 'User',\n 'password1': PASSWORD,\n ''\n })", "def test_no_password(self):\n response = self.register({\n 'first_name': \"David\",\n 'last_name': \"Smith\",\n 'email': \"[email protected]\",\n 'phone_number': \"012-345-6789\"\n })\n self.assertEqual(response.status_code, 400)\n self.assertDictContainsSubset({'message': \"Missing parameters\"}, response.json())", "def test_signup_view(self, user_data, client: Client):\n response = client.post(\n reverse_lazy(\"users:signup\"),\n {\n \"username\": user_data.email,\n \"email\": user_data.email,\n \"password1\": user_data.password,\n \"password2\": user_data.password,\n },\n content_type=\"application/x-www-form-urlencoded\",\n )\n assert response.status_code == 200", "def test_signup_with_blank_email_false(self):\n user = {\n \"Email\": \"\",\n \"Password\": \"pass1234\",\n \"Confirm Password\": \"pass5678\"\n }\n res = self.client().post('/api/v1/auth/signup', data=user)\n self.assertEqual(res.status_code, 400)\n res = res.get_json()\n self.assertEqual(res['error'][0],\n 'Email should not be blank')", "def test_signup(self):\n res = self.client.get(\"/registration\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Create Account\" in data", "def test_sign_up(self):\n\n with self.client as client:\n \n # testing a valid signup\n response = client.post(\n '/signup',\n data = {\n \"username\" : \"testuser3\",\n \"password\" : \"password\",\n \"email\" : \"[email protected]\",\n \"image_url\" : \"http://test.jpeg\"\n },\n follow_redirects=False)\n \n self.assertEqual(response.status_code, 302)\n \n #separate this as different test\n client.post(\n '/signup',\n data = {\n \"username\" : \"testuser3\",\n \"password\" : \"password\",\n \"email\" : \"[email protected]\",\n \"image_url\" : \"http://test.jpeg\"\n },\n follow_redirects=False)\n\n self.assertIn(\"Username already taken\", get_flashed_messages())", "def test_case2(self):\n\n valid_email = \"[email protected]\"\n valid_password = \"asdasdasd\"\n valid_firstname = \"name1\"\n valid_familyname = \"name2\"\n valid_gender = \"Female\"\n valid_city = \"Testcity\"\n valid_country = \"Testcountry\"\n\n # Valid sign up\n response = test_sign_up(valid_email, valid_password, valid_firstname, valid_familyname, valid_gender, valid_city,\n valid_country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], True, sign_up_json[\"message\"])\n\n email = \"[email protected]\"\n password = \"asdasdasd\"\n first_name = \"name1\"\n family_name = \"name2\"\n gender = \"Male\"\n city = \"Testcity\"\n country = \"Testcountry\"\n\n invalid_password = \"invalid_password\"\n unknown_email = \"[email protected]\"\n\n\n # Sign up with missing input\n response = test_sign_up(email, password, \"\", family_name, gender, city, country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], False, sign_up_json[\"message\"])\n self.assertEqual(sign_up_json[\"message\"], BAD_SIGNUP_ERROR_MSG, \"Wrong error message.\")\n\n # Sign up with too short password\n response = test_sign_up(email, \"short\", first_name, family_name, gender, city, country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], False, sign_up_json[\"message\"])\n self.assertEqual(sign_up_json[\"message\"], BAD_SIGNUP_ERROR_MSG, \"Wrong error message.\")\n\n # Sign up with already existing email\n response = test_sign_up(valid_email, password, first_name, family_name, gender, city, country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], False, sign_up_json[\"message\"])\n self.assertEqual(sign_up_json[\"message\"], EMAIL_IN_USE_ERROR_MSG, \"Wrong error message.\")\n\n\n # Sign in with unknown email\n response = test_sign_in(unknown_email, password)\n assert response.status_code == 200\n sign_in_json = json.loads(response.text)\n self.assertEqual(sign_in_json[\"success\"], False, sign_in_json[\"message\"])\n self.assertEqual(sign_in_json[\"message\"], BAD_LOGIN_ERROR_MSG, \"Wrong error message.\")\n\n # Sign in with invalid password\n response = test_sign_in(valid_email, invalid_password)\n assert response.status_code == 200\n sign_in_json = json.loads(response.text)\n self.assertEqual(sign_in_json[\"success\"], False, sign_in_json[\"message\"])\n self.assertEqual(sign_in_json[\"message\"], BAD_LOGIN_ERROR_MSG, \"Wrong error message.\")", "def test_sign_in_no_email(self):\n response = self.client.post(reverse('backend:sign_in'), {'password': '******'})\n self.assertEqual(response.status_code, 400)\n self.assertDictContainsSubset({'status': \"failure\"}, response.json())\n self.assertDictContainsSubset({'message': \"Missing parameters\"}, response.json())", "def test_signup(self):\n response = self.client.post('/user/', {\n 'username': 'aseem123', 'password': 'passwrodaosida123'\n })\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)" ]
[ "0.7844327", "0.77811277", "0.7775406", "0.76737154", "0.7640591", "0.75956047", "0.74659085", "0.73942053", "0.7378038", "0.7337052", "0.73232025", "0.73102736", "0.73099387", "0.7291879", "0.7289674", "0.72811973", "0.7267153", "0.7262471", "0.72411805", "0.7226106", "0.7221622", "0.720397", "0.72015697", "0.71845007", "0.71494055", "0.7143722", "0.7137473", "0.7126134", "0.7109231", "0.71027416" ]
0.7986197
0
Test get self Chef
def test_get_self(self): url = '/0/chefs' resp = self.client.get(url) self.assertPermissionDenied(resp) headers = self.login() resp = self.client.get(url, **headers) self.assertEqual(resp.status_code, 200) self.assertIn('chef', resp.data) self.assertEqual(self.CHEF_VIEW_KEYS - set(['followed']), set(resp.data['chef'].keys()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_system(self):\n pass", "def test_get_run(self):\n pass", "def test_get_recipe_information(self):\n pass", "def test_get_commands(self):\n self.installer.os = \"amazonLinux\"\n commands = self.installer._get_commands()\n assert len(commands) > 0\n self.installer.os = \"ubuntu\"\n commands = self.installer._get_commands()\n assert len(commands) > 0\n self.installer.os = \"does_not_exist\"\n commands = self.installer._get_commands()\n self.assertEqual(len(commands), 1)", "def test_get_software(self):\n pass", "def _test_cli_package(self):\n self.keystone_url = self.bootstrap_inputs['keystone_url']\n iaas_resolver = '{0} {1}' \\\n .format(*self._resolve_url_to_ip_and_netloc(self.keystone_url))\n iaas_resolver_cmd = 'echo {0} >> /etc/hosts'.format(iaas_resolver)\n\n # Make sure cli machine is up with a registered ssh key\n wait_for_vm_to_become_ssh_available(env, self._execute_command,\n self.logger)\n\n with self.dns():\n self.logger.info('Preparing CLI and downloading example')\n package_name = self._prepare_cli()\n blueprint_path = self.get_hello_world_blueprint()\n\n self._install_cli(package_name)\n self.logger.info('Preparing manager blueprint')\n self.prepare_manager_blueprint()\n self._update_hosts_file(iaas_resolver)\n\n # Getting the remote manager blueprint and preparing resources\n self.logger.info('Retrieving remote manager blueprint file...')\n manager_blueprint = StringIO()\n fab.get(self.test_manager_blueprint_path, manager_blueprint)\n manager_blueprint_yaml = yaml.load(manager_blueprint.getvalue())\n resources_to_download = self._get_resource_list(manager_blueprint_yaml)\n\n # each os should implement any vm-related function before this comment\n\n with FileServer(self._get_file_server_inputs(), resources_to_download,\n FILE_SERVER_PORT, self.logger) as fs:\n additional_inputs = fs.get_processed_inputs()\n self._update_inputs_file(additional_inputs)\n\n self.logger.info('Bootstrapping...')\n self.bootstrap_manager()\n\n # Adding iaas resolver for the manager machine.\n self.logger.info('adding {0} to /etc/hosts of the manager vm'\n .format(iaas_resolver))\n manager_fab_conf = {\n 'user': self.client_user,\n 'key_filename': self._get_manager_kp(),\n 'host_string': self.manager_ip,\n 'timeout': 30,\n 'connection_attempts': 10\n }\n wait_for_vm_to_become_ssh_available(manager_fab_conf,\n self._execute_command,\n self.logger)\n self._run_cmd_on_custom_machine(iaas_resolver_cmd,\n manager_fab_conf, sudo=True)\n\n # Uploading, deploying and testing hello_world blueprint\n # Any sleep is to allow the execution to complete\n # TODO: remove this line when the openstack sg description fix is applied # NOQA\n self._update_example_sg()\n\n self.logger.info('Testing the example deployment cycle...')\n blueprint_id = 'blueprint-{0}'.format(uuid.uuid4())\n\n self._upload_blueprint(blueprint_path, blueprint_id,\n self.app_blueprint_file)\n self.deployment_id = self.create_deployment(blueprint_id)\n self.addCleanup(self.uninstall_deployment)\n self.install_deployment(self.deployment_id)\n self.assert_deployment_working(\n self._get_app_property('http_endpoint'))", "def test_gettem_using_get(self):\n pass", "def provision():\n sudo('chef-client')", "def test_get_client(self):\n pass", "def test_get(self):\n pass", "def test_create_get(self):\n self.shell.onecmd(\"create %s/one 'hello'\" % (self.tests_path))\n self.shell.onecmd(\"get %s/one\" % (self.tests_path))\n self.assertEqual(\"hello\\n\", self.output.getvalue())", "def test_get_virtual_service(self):\n pass", "def test_health_get(self):\n pass", "def test_get_random_recipes(self):\n pass", "def test_get_food(self):\n pass", "def test_get(self):\n mock = MagicMock(\n return_value={\"retcode\": 0, \"stderr\": \"error\", \"stdout\": \"salt\"}\n )\n with patch.dict(openstack_config.__salt__, {\"cmd.run_all\": mock}):\n self.assertEqual(\n openstack_config.get(\"/etc/keystone/keys.conf\", \"sql\", \"connection\"),\n \"salt\",\n )\n\n mock = MagicMock(\n return_value={\"retcode\": 1, \"stderr\": \"error\", \"stdout\": \"salt\"}\n )\n with patch.dict(openstack_config.__salt__, {\"cmd.run_all\": mock}):\n self.assertRaises(\n CommandExecutionError,\n openstack_config.get,\n \"/etc/key/keystone.conf\",\n \"sql\",\n \"connection\",\n )", "def test_orchestrator_http_simple(self):\n pass", "def test_virtualservice_get(self):\n pass", "def init():\n\n @click.command('sysapps')\n def check_sysapps():\n \"\"\"Checkout system apps health.\"\"\"\n return sysapps.test\n\n return check_sysapps", "def test_get_by_name(self):\n actual = chef_role.get_by_name(self.role_name)\n eq_(actual['chef_role_id'], self.role_id)", "def test_for_client():", "def test_client_retrieve(self):\n pass", "def test_05_get(self, mock_readall, mock_config, mock_verks):\n self._init()\n udocker.Config = mock_config\n udocker.Config.tmpdir = \"/tmp\"\n mock_readall.return_value = self.credentials\n kstore = udocker.KeyStore(\"filename\")\n self.assertTrue(kstore.get(self.url))\n self.assertFalse(kstore.get(\"NOT EXISTING ENTRY\"))", "def test_get1(self):\n pass", "def main() -> None:\n\n user = demisto.params().get(\"credentials\", {}).get(\"identifier\")\n password = demisto.params().get(\"credentials\", {}).get(\"password\")\n\n base_url = urljoin(demisto.params()[\"url\"], \"/Konfigurator/REST\")\n verify_certificate = not demisto.params().get(\"insecure\", False)\n proxy = demisto.params().get(\"proxy\", False)\n headers = {\"Content-Type\": \"application/mwg+xml\"}\n\n command = demisto.command()\n\n demisto.debug(f\"Command being called is {command}\")\n\n try:\n with Client(\n username=user,\n password=password,\n base_url=base_url,\n verify=verify_certificate,\n headers=headers,\n proxy=proxy,\n ) as client:\n commands = {\n \"test-module\": test_module,\n \"swg-get-available-lists\": get_lists_command,\n \"swg-get-list\": get_list_command,\n \"swg-get-list-entry\": get_list_entry_command,\n \"swg-modify-list\": modify_list_command,\n \"swg-insert-entry\": insert_entry_command,\n \"swg-delete-entry\": delete_entry_command,\n \"swg-create-list\": create_list_command,\n \"swg-delete-list\": delete_list_command,\n }\n if command not in commands:\n raise NotImplementedError(f'Command {command} was not implemented.')\n return_results(commands[command](client, demisto.args()))\n except Exception as e:\n return_error(f'Failed to execute {command} command.\\nError:\\n{str(e)}')", "def test_basic():\n client = TestClient()\n client.run(\"config get\")\n assert \"default_profile = default\" in client.out\n assert \"path = ./data\" in client.out", "def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_get_systems(self):\n pass", "def test_set_get(self):\n self.shell.onecmd(\"create %s/one 'hello'\" % (self.tests_path))\n self.shell.onecmd(\"set %s/one 'bye'\" % (self.tests_path))\n self.shell.onecmd(\"get %s/one\" % (self.tests_path))\n self.assertEqual(\"bye\\n\", self.output.getvalue())", "def cli(ctx):" ]
[ "0.6476642", "0.634032", "0.6107374", "0.60835195", "0.6045064", "0.6035675", "0.60203016", "0.60191286", "0.5964071", "0.58675206", "0.5860166", "0.5804437", "0.5799616", "0.5771165", "0.57573444", "0.57526183", "0.57388955", "0.5721284", "0.57191247", "0.57172894", "0.5679127", "0.5668057", "0.56545687", "0.56423974", "0.56386626", "0.56294936", "0.56294495", "0.56139594", "0.5612898", "0.56110096" ]
0.6809186
0
Test get Chef with photo
def test_get_chef_with_photo(self): url = '/0/chefs/' + str(self.user.pk) headers = self.login() resp = self.client.get(url, **headers) self.assertEqual(resp.status_code, 200) self.assertIn('chef', resp.data) self.assertNotIn('photo', resp.data['chef']) self.user.avatar_photos.create(s3_url='image') # Create photo resp = self.client.get(url, **headers) self.assertEqual(resp.status_code, 200) self.assertIn('chef', resp.data) self.assertIn('photo', resp.data['chef']) keys = set(('id', 'url', 'creation_date', 'edit_date')) self.assertEqual(keys, set(resp.data['chef']['photo']))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def test_get_ao_image(self):\n response = self.client.open(\n '/rui-support/ao-image',\n method='GET',\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_aws_service_api_image_get(self):\n pass", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_read_image(self):\n pass", "def test_upload_image_to_recipe(self):\n url = image_upload_url(self.recipe.id)\n\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB',(10,10))\n img.save(ntf,format='JPEG')\n ntf.seek(0)\n res = self.client.patch(url,{'image':ntf},format='multipart')\n # print(res.data)\n self.recipe.refresh_from_db()\n self.assertIn('image', res.data)\n self.assertEqual(res.status_code,status.HTTP_200_OK)\n\n self.assertTrue(os.path.exists(self.recipe.image.path))", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "def test_aws_service_api_public_image_get(self):\n pass", "def test_list_image(self):\n pass", "async def test_get_image(opp, utcnow):\n helper = await setup_test_component(opp, create_camera)\n image = await camera.async_get_image(opp, helper.entity_id)\n assert image.content == base64.b64decode(FAKE_CAMERA_IMAGE)", "def test_signup_photo(self, mocked_sendy):\n url = '/0/chefs'\n data = {\n 'email': '[email protected]',\n 'password': 'secret',\n 'name': 'John',\n 'surname': 'Doe',\n 'language': 'es',\n 'photo': IMAGES['png'],\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('auth', resp.data)\n self.assertIn('token', resp.data['auth'])\n # Check that the photo exists\n self.assertTrue(Chefs.objects.last().avatar_photos.all())", "def test_images_add(self):\n response = self.client.get('/images/photos/add')\n self.assertEqual(response.status_code, 200)", "def test_get_image_task(self):\n resp = self.app.get('/api/2/inf/esrs/image',\n headers={'X-Auth': self.token})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_resource_user_resource_get_avatar_file_get(self):\n pass", "def test_uploading_image_to_recipe(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB',(10,10))\n img.save(ntf,format='JPEG')\n ntf.seek(0)\n res = self.client.post(url, {'image': ntf}, format='multipart')\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "def test_aws_service_api_public_images_get(self):\n pass", "def test_upload_image_to_recipe(self):\n url = image_upload_url(self.recipe.id)\n # we will create a temp file, write an image to it, then upload that file through the API endpoint\n with tempfile.NamedTemporaryFile(suffix = '.jpg') as ntf: # create a file in the system that we can write to (in a random location usually in /tempfolder). suffix is actually the extension\n img = Image.new('RGB', (10,10)) # create a black square image (10 pixels * 10 pixels) - we want a very small image -\n img.save(ntf, format = 'JPEG') # save the image to our NamedTemporaryFile\n ntf.seek(0) # since we save the file, the seeking (pointer) is at the end of file (if you try to access it it will appear blank), so here we set the pointer back to the beginning\n res = self.client.post(url, {'image': ntf}, format = 'multipart') # our serializer only takes image. format arg to tell django we wanna make a multipart form request (a form that consists of data instead of default JSON object)\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path)) # check that the path exists for the image in our file system", "def test_list_image_metadata(self):\n pass", "def test_user_photo_creation_succeeds(self):\n\t\timage_name = 'test1.png'\n\t\twith open(self.img_url, 'rb') as image:\t\t\t\n\t\t\tdata = {'image': image, 'name':image_name}\t\t\t\n\t\t\tresponse = self.client.post(reverse('photos'), data)\n\t\t\timage.close()\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data.get('name'), image_name)", "def test_basic_run(self):\n self.expect_datatore_lookup('SomeBlobKey', True)\n self.expect_open_image('SomeBlobKey', (1600, 1200))\n self.expect_resize(blob_image._DEFAULT_SERVING_SIZE)\n self.expect_encode_image('SomeImageInJpeg')\n self.run_request('image/jpeg', 'SomeImageInJpeg')", "def test_create_image(self):\n pass", "def test_upload_photo(self):\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.normal_token.key)\n\n url = reverse('crew-api:upload-photo', args=[self.normal_user.crew.uuid])\n\n photo_file = self.generate_photo_file()\n\n data = {\n 'photo':photo_file\n }\n\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_users_photos_view_set_get_own_photos(self):\n # Create user and data\n user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='aov_hov')\n category = photo_models.PhotoClassification.objects\\\n .create_or_update(name='Test', classification_type='category')\n\n photo1 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo1-min.jpg', 'rb')), user=user)\n photo1.save()\n photo1.category.set([category])\n photo1.save()\n\n photo2 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo2-min.jpg', 'rb')), user=user)\n photo2.save()\n photo2.category.set([category])\n photo2.save()\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(user.id), format='json')\n result = request.data['results']\n\n self.assertEquals(request.status_code, 200)\n self.assertEquals(len(result), 2)\n self.assertEquals(result[0]['id'], photo2.id) # newest first\n self.assertIn('dimensions', result[0])\n self.assertIn('image_blurred', result[0])\n self.assertIn('image', result[0])", "def test_get_cover_url(self):\n blob = [\"image\"]\n result = self.connector.get_cover_url(blob)\n self.assertEqual(result, \"https://covers.openlibrary.org/b/id/image-L.jpg\")", "def test_aws_service_api_private_images_get(self):\n pass", "def test_upload_image_to_recipe(self):\n url = create_upload_image_url(self.recipe.id)\n\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n image = Image.new(\"RGB\", (10, 10))\n image.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(url, {'image': ntf}, format=\"multipart\")\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "def get_photo(self, photo_id):\n uri = 'photos/' + photo_id\n return self.make_request(uri)", "def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)" ]
[ "0.72576076", "0.7185627", "0.7070028", "0.6913519", "0.6779656", "0.67234474", "0.6634403", "0.66217315", "0.66099155", "0.6528186", "0.65029997", "0.6425517", "0.6420477", "0.64150953", "0.6414753", "0.63893515", "0.6307123", "0.62892", "0.6271793", "0.62598026", "0.6230758", "0.6206582", "0.620133", "0.6148049", "0.61459994", "0.6137513", "0.6130658", "0.612448", "0.60765797", "0.6061572" ]
0.820912
0
Test get drafts of a chef
def test_get_drafts(self): r1 = Recipes.objects.create(chef=self.user, name="Recipe 1", draft=True) r2 = Recipes.objects.create(chef=self.user, name="Recipe 2", draft=False) url = '/0/chefs/%i/drafts' % self.user.pk resp = self.client.get(url) self.assertPermissionDenied(resp) headers = self.login() resp = self.client.get(url, **headers) self.assertEqual(resp.status_code, 200) self.assertIn('drafts', resp.data) self.assertEqual(1, len(resp.data['drafts'])) keys = ("liked", "public_url", "edit_date", "ingredients", "shared", "tags", "commented", "private", "id", "chef", "reported", "nb_shares", "added", "nb_added", "nb_comments", "draft", "commensals", "creation_date", "nb_likes", "name", "products", "prep_time", "serves", "bought", "book_for_sale", "description") self.assertEqual(set(keys), set(resp.data['drafts'][0].keys())) self.assertEqual(r1.pk, resp.data['drafts'][0]['id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_load_draft(league):\n draft = league.draft_results()\n assert(len(draft) == 144)\n #mcdavid 1st\n assert(draft[0]['player_key'] == '396.p.6743')\n # carter hart 67th\n assert(draft[66]['player_key'] == '396.p.7156')\n # zadorov last\n assert(draft[-1]['player_key'] == '396.p.5995')", "def test_with_draft_diff(self):\n repository = self.create_repository(tool_name='Test')\n review_request = self.create_review_request(\n repository=repository,\n submitter=self.user,\n publish=True)\n diffset = self.create_diffset(review_request, draft=True)\n filediff = self.create_filediff(diffset)\n\n rsp = self.api_get(\n get_original_file_url(review_request, diffset, filediff),\n expected_status=404)\n self.assertEqual(rsp['stat'], 'fail')\n self.assertEqual(rsp['err']['code'], DOES_NOT_EXIST.code)", "def test_get_recipes(self):\n r1 = Recipes.objects.create(chef=self.user, name=\"Recipe 1\", draft=True)\n r2 = Recipes.objects.create(chef=self.user, name=\"Recipe 2\", draft=False)\n r3 = Recipes.objects.create(chef=self.user, name=\"Recipe 3\", draft=False)\n book = Book.objects.create(chef=self.user, book_type=Book.TO_SELL)\n book.add_recipe(r3)\n\n url = '/0/chefs/%i/recipes' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('recipes', resp.data)\n self.assertEqual(1, len(resp.data['recipes']))\n keys = (\"liked\", \"public_url\", \"edit_date\", \"ingredients\", \"shared\", \"tags\", \"commented\",\n \"private\", \"id\", \"chef\", \"reported\", \"nb_shares\", \"added\", \"nb_added\",\n \"nb_comments\", \"draft\", \"commensals\", \"creation_date\", \"nb_likes\", \"name\",\n \"products\", \"prep_time\", \"serves\", \"bought\", \"book_for_sale\", \"description\")\n self.assertEqual(set(keys), set(resp.data['recipes'][0].keys()))\n self.assertEqual(r2.pk, resp.data['recipes'][0]['id'])", "def test_make_draft(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'create_draft'}\r\n )\r\n # Update the draft version and check that published is different.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'metadata': {'due': '2077-10-10T04:00Z'}}\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))", "def test_create_draft_with_multiple_requests(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n draft_1 = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNotNone(draft_1)\r\n\r\n # Now check that when a user sends request to create a draft when there is already a draft version then\r\n # user gets that already created draft instead of getting 'DuplicateItemError' exception.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n draft_2 = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNotNone(draft_2)\r\n self.assertEqual(draft_1, draft_2)", "def test_home_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertContains(response, \"No posts are available.\")\n self.assertQuerysetEqual(response.context['posts'], [])", "def test_draft_list_does_not_show_deleted_aids(client, contributor):\n\n AidFactory(name='Is this the real life?', author=contributor,\n status='deleted')\n client.force_login(contributor)\n drafts_url = reverse('aid_draft_list_view')\n res = client.get(drafts_url)\n\n content = res.content.decode('utf-8')\n assert 'Is this the real life?' not in content", "def test_draft_pages():\n app = create_ctfd()\n with app.app_context():\n gen_page(app.db, title=\"Title\", route=\"this-is-a-route\", html=\"This is some HTML\", draft=True)\n\n with app.test_client() as client:\n r = client.get('/this-is-a-route')\n assert r.status_code == 404\n\n register_user(app)\n client = login_as_user(app)\n r = client.get('/this-is-a-route')\n assert r.status_code == 404\n destroy_ctfd(app)", "def edit_draft(self):\r\n EmptyPromise(\r\n lambda: self.q(css='.create-draft').present,\r\n 'Wait for edit draft link to be present'\r\n ).fulfill()\r\n\r\n self.q(css='.create-draft').first.click()\r\n\r\n EmptyPromise(\r\n lambda: self.q(css='.editing-draft-alert').present,\r\n 'Wait for draft mode to be activated'\r\n ).fulfill()", "def test_templates_person_detail_cms_draft_content(self):\n user = UserFactory(is_staff=True, is_superuser=True)\n self.client.login(username=user.username, password=\"password\")\n\n published_category = CategoryFactory(should_publish=True)\n not_published_category = CategoryFactory()\n\n published_organization = OrganizationFactory(should_publish=True)\n not_published_organization = OrganizationFactory()\n\n person = PersonFactory(\n page_title=\"My page title\",\n fill_portrait=True,\n fill_bio=True,\n fill_maincontent=True,\n fill_categories=[published_category, not_published_category],\n fill_organizations=[published_organization, not_published_organization],\n )\n\n # Modify the draft version of the published category\n title_obj = published_category.extended_object.title_set.get(language=\"en\")\n title_obj.title = \"modified category\"\n title_obj.save()\n\n # Modify the draft version of the published organization\n title_obj = published_category.extended_object.title_set.get(language=\"en\")\n title_obj.title = \"modified organization\"\n title_obj.save()\n page = person.extended_object\n\n # The page should be visible as draft to the superuser\n url = page.get_absolute_url()\n response = self.client.get(url)\n content = htmlmin.minify(\n response.content.decode(\"UTF-8\"),\n reduce_empty_attributes=False,\n remove_optional_attribute_quotes=False,\n )\n\n self.assertContains(\n response,\n \"<title>My page title - example.com</title>\",\n html=True,\n status_code=200,\n )\n title = person.extended_object.get_title()\n self.assertContains(\n response,\n f'<h1 class=\"subheader__title\">{title:s}</h1>',\n html=True,\n )\n\n # Main content should be present when not empty\n self.assertContains(response, \"person-detail__maincontent\")\n\n # The published category should be on the page in its published version\n self.assertContains(\n response,\n (\n # pylint: disable=consider-using-f-string\n '<a class=\"category-badge\" href=\"{:s}\">'\n '<span class=\"offscreen\">Category</span>'\n '<span class=\"category-badge__title\">{:s}</span></a>'\n ).format(\n published_category.public_extension.extended_object.get_absolute_url(),\n published_category.public_extension.extended_object.get_title(),\n ),\n html=True,\n )\n # The not published category should not be on the page\n self.assertContains(\n response,\n (\n # pylint: disable=consider-using-f-string\n '<a class=\"category-badge category-badge--draft\" href=\"{:s}\">'\n '<span class=\"offscreen\">Category</span>'\n '<span class=\"category-badge__title\">{:s}</span></a>'\n ).format(\n not_published_category.extended_object.get_absolute_url(),\n not_published_category.extended_object.get_title(),\n ),\n html=True,\n )\n\n # The published organization should be on the page in its published version\n self.assertIn(\n # pylint: disable=consider-using-f-string\n '<div class=\"organization-glimpse\" property=\"contributor\" '\n 'typeof=\"CollegeOrUniversity\"><a href=\"{:s}\" title=\"{:s}\">'.format(\n published_organization.extended_object.get_absolute_url(),\n published_organization.extended_object.get_title(),\n ),\n content,\n )\n self.assertContains(\n response,\n # pylint: disable=consider-using-f-string\n '<h2 class=\"organization-glimpse__title\" property=\"name\">{:s}</h2>'.format(\n published_organization.public_extension.extended_object.get_title()\n ),\n html=True,\n )\n # The not published organization should not be on the page\n self.assertIn(\n # pylint: disable=consider-using-f-string\n '<a href=\"{:s}\" title=\"{:s}\">'.format(\n not_published_organization.extended_object.get_absolute_url(),\n not_published_organization.extended_object.get_title(),\n ),\n content,\n )\n\n self.assertContains(\n response,\n # pylint: disable=consider-using-f-string\n '<h2 class=\"organization-glimpse__title\" property=\"name\">{:s}</h2>'.format(\n not_published_organization.extended_object.get_title()\n ),\n html=True,\n )\n\n self.assertNotContains(response, \"modified\")", "def test_create_draft_with_update(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'metadata': {'due': '2077-10-10T04:00Z'},\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))", "def test_get_list_published_user_drafts(self):\n story1 = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published',\n language=\"en\", author=self.user)\n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='draft',\n language=\"en\", author=self.user)\n self.api_client.client.login(username=self.username, password=self.password)\n uri = '/api/0.1/stories/'\n resp = self.api_client.get(uri)\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 2)\n story_ids = [story['story_id'] for story in self.deserialize(resp)['objects']]\n self.assertIn(story1.story_id, story_ids)\n self.assertIn(story2.story_id, story_ids)", "def test_01_check_to_state_draft_post(self):\r\n cr, uid = self.cr, self.uid\r\n filter_draft = self.create_filter_draft(cr, uid)\r\n self.create_rule(cr, uid, 'on_create')\r\n new_lead_id = self.create_lead_test_1(cr, uid)\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'draft')\r\n self.assertEquals(new_lead.user_id.id, self.demo)\r\n self.delete_rules(cr, uid)", "def test_02_check_from_draft_to_done_with_steps(self):\r\n cr, uid = self.cr, self.uid\r\n filter_draft = self.create_filter_draft(cr, uid)\r\n filter_done = self.create_filter_done(cr, uid)\r\n self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)\r\n new_lead_id = self.create_lead_test_1(cr, uid)\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'draft')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n \"\"\" change the state of new_lead to open and check that responsible doen't change\"\"\"\r\n new_lead.write({'state': 'open'})\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'open')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n \"\"\" change the state of new_lead to pending and check that responsible doen't change\"\"\"\r\n new_lead.write({'state': 'pending'})\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'pending')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n \"\"\" change the state of new_lead to cancel and check that responsible doen't change\"\"\"\r\n new_lead.write({'state': 'cancel'})\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'cancel')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n \"\"\" change the state of new_lead to done and check that responsible doen't change \"\"\"\r\n new_lead.write({'state': 'done'})\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'done')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n self.delete_rules(cr, uid)", "def test_draft_unit_page_html(self):\r\n draft_unit = modulestore('draft').convert_to_draft(self.vertical.location)\r\n html = self.get_page_html(draft_unit)\r\n self.validate_html_for_add_buttons(html)", "def test_update_draft():\n with open(basedir + \"fixture/7149593_formatted.json\", \"r\") as f:\n data = f.read()\n storage.save_draft(user_id, \"bib\", \"7149593\", data, \"1362044230872\")\n json_data = json.loads(data)\n json_data['@context'] = \"yadda\"\n storage.update_draft(user_id, \"bib\", \"7149593\", json.dumps(json_data), \"1362044230872\")\n assert json.loads(open(basedir + \"some/path/\" + user_id + \"/bib/7149593\", \"r\").read())['document']['@context'] == \"yadda\"", "def drafts(self):\n if self._drafts is None:\n if self._initialize_drafts():\n self._save_drafts()\n return self._drafts", "def test_create_with_new_draft(self):\n user1 = User.objects.create(username='reviewer1')\n user2 = User.objects.create(username='reviewer2')\n\n group1 = self.create_review_group(name='group1')\n group2 = self.create_review_group(name='group2')\n\n dep_review_request_1 = self.create_review_request(publish=True)\n dep_review_request_2 = self.create_review_request(publish=True)\n\n review_request = self.create_review_request(\n publish=True,\n bugs_closed='1,20,300',\n commit_id='abc123',\n description_rich_text=True,\n depends_on=[dep_review_request_1, dep_review_request_2],\n rich_text=True,\n target_groups=[group1, group2],\n target_people=[user1, user2],\n testing_done_rich_text=True,\n extra_data={\n 'key': {\n 'values': [1, 2, 3],\n },\n 'mybool': True,\n })\n\n active_file_attachment_1 = self.create_file_attachment(review_request)\n active_file_attachment_2 = self.create_file_attachment(review_request)\n inactive_file_attachment = self.create_file_attachment(review_request,\n active=False)\n\n active_screenshot_1 = self.create_screenshot(review_request)\n active_screenshot_2 = self.create_screenshot(review_request)\n inactive_screenshot = self.create_screenshot(review_request,\n active=False)\n\n # Create the draft.\n draft = ReviewRequestDraft.create(review_request)\n\n # Make sure all the fields are the same.\n self.assertEqual(draft.branch, review_request.branch)\n self.assertEqual(draft.bugs_closed, review_request.bugs_closed)\n self.assertEqual(draft.commit_id, review_request.commit_id)\n self.assertEqual(draft.description, review_request.description)\n self.assertEqual(draft.description_rich_text,\n review_request.description_rich_text)\n self.assertEqual(draft.extra_data, review_request.extra_data)\n self.assertEqual(draft.rich_text, review_request.rich_text)\n self.assertEqual(draft.summary, review_request.summary)\n self.assertEqual(draft.testing_done, review_request.testing_done)\n self.assertEqual(draft.testing_done_rich_text,\n review_request.testing_done_rich_text)\n\n self.assertEqual(list(draft.depends_on.order_by('pk')),\n [dep_review_request_1, dep_review_request_2])\n self.assertEqual(list(draft.target_groups.all()),\n [group1, group2])\n self.assertEqual(list(draft.target_people.all()),\n [user1, user2])\n self.assertEqual(list(draft.file_attachments.all()),\n [active_file_attachment_1, active_file_attachment_2])\n self.assertEqual(list(draft.inactive_file_attachments.all()),\n [inactive_file_attachment])\n self.assertEqual(list(draft.screenshots.all()),\n [active_screenshot_1, active_screenshot_2])\n self.assertEqual(list(draft.inactive_screenshots.all()),\n [inactive_screenshot])\n\n self.assertIsNotNone(draft.changedesc)", "def test_show_post_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n draft_post = create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n url = reverse('blog.post', args=(draft_post.id,))\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)", "def test_published_and_draft_contents_with_update(self):\r\n\r\n # Make problem public.\r\n resp = self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n\r\n # Now make a draft\r\n resp = self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'id': unicode(self.problem_usage_key),\r\n 'metadata': {},\r\n 'data': \"<p>Problem content draft.</p>\",\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n\r\n # Both published and draft content should be different\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertNotEqual(draft.data, published.data)\r\n\r\n # Get problem by 'xblock_handler'\r\n view_url = reverse_usage_url(\"xblock_view_handler\", self.problem_usage_key, {\"view_name\": \"student_view\"})\r\n resp = self.client.get(view_url, HTTP_ACCEPT='application/json')\r\n self.assertEqual(resp.status_code, 200)\r\n\r\n # Activate the editing view\r\n view_url = reverse_usage_url(\"xblock_view_handler\", self.problem_usage_key, {\"view_name\": \"studio_view\"})\r\n resp = self.client.get(view_url, HTTP_ACCEPT='application/json')\r\n self.assertEqual(resp.status_code, 200)\r\n\r\n # Both published and draft content should still be different\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertNotEqual(draft.data, published.data)", "def test_draft_path_layer_cache(self):\n cache = caches[settings.MAPENTITY_CONFIG['GEOJSON_LAYERS_CACHE_BACKEND']]\n\n obj = self.modelfactory(draft=False)\n self.modelfactory(draft=True)\n\n # There are 7 queries to get layer without drafts\n with self.assertNumQueries(5):\n response = self.client.get(obj.get_layer_url(), {\"_no_draft\": \"true\"})\n self.assertEqual(len(response.json()['features']), 1)\n\n # We check the content was created and cached with no_draft key\n # We check that any cached content can be found with no_draft (we still didn't ask for it)\n last_update = Path.no_draft_latest_updated()\n last_update_draft = Path.latest_updated()\n geojson_lookup = 'en_path_%s_nodraft_json_layer' % last_update.strftime('%y%m%d%H%M%S%f')\n geojson_lookup_last_update_draft = 'en_path_%s_json_layer' % last_update_draft.strftime('%y%m%d%H%M%S%f')\n content = cache.get(geojson_lookup)\n content_draft = cache.get(geojson_lookup_last_update_draft)\n\n self.assertEqual(response.content, content.content)\n self.assertIsNone(content_draft)\n\n # We have 1 less query because the generation of paths was cached\n with self.assertNumQueries(3):\n self.client.get(obj.get_layer_url(), {\"_no_draft\": \"true\"})\n\n self.modelfactory(draft=True)\n\n # Cache was not updated, the path was a draft\n with self.assertNumQueries(3):\n self.client.get(obj.get_layer_url(), {\"_no_draft\": \"true\"})\n\n self.modelfactory(draft=False)\n\n # Cache was updated, the path was not a draft : we get 7 queries\n with self.assertNumQueries(5):\n self.client.get(obj.get_layer_url(), {\"_no_draft\": \"true\"})", "def test_get_revision(self):\n pass", "def drafts():\n query = Entry.drafts().order_by(Entry.last_mod_date.desc())\n return object_list('index.html', query)", "def action_draft(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'draft'\n action = 'draft'\n default = {\n 'state': status,\n 'engineering_writable': True,\n }\n doc_default = {\n 'state': status,\n 'writable': True,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Draft'),\n 'action': action,\n 'docaction': 'draft',\n 'excludeStatuses': ['draft', 'released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['confirmed', 'uploaded', 'transmitted'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)", "def get_drafts(self):\n return self.filter(status=\"D\")", "def test_save_draft():\n with open(basedir + \"fixture/7149593_formatted.json\", \"r\") as f:\n storage.save_draft(user_id, \"bib\", \"7149593\", f.read(), \"1362044230872\")\n with open(basedir + \"some/path/\" + user_id + \"/bib/7149593\", \"r\") as f:\n json_data = json.loads(f.read())\n assert json_data['document']['@id'].rsplit(\"/\",1)[1] == '7149593'\n assert json_data['etag'] == \"1362044230872\"", "def test_publish_draft_delete(self):\r\n location = self.old_course_key.make_usage_key('vertical', name='Vert1')\r\n item = self.draft_mongo.get_item(location, 2)\r\n self._xmodule_recurse(\r\n item,\r\n lambda i: self.draft_mongo.publish(i.location, self.userid)\r\n )\r\n # verify status\r\n item = self.draft_mongo.get_item(location, 0)\r\n self.assertFalse(getattr(item, 'is_draft', False), \"Item was published. Draft should not exist\")\r\n # however, children are still draft, but I'm not sure that's by design\r\n\r\n # convert back to draft\r\n self.draft_mongo.convert_to_draft(location)\r\n # both draft and published should exist\r\n draft_vert = self.draft_mongo.get_item(location, 0)\r\n self.assertTrue(getattr(draft_vert, 'is_draft', False), \"Item was converted to draft but doesn't say so\")\r\n item = self.old_mongo.get_item(location, 0)\r\n self.assertFalse(getattr(item, 'is_draft', False), \"Published item doesn't say so\")\r\n\r\n # delete the discussion (which oddly is not in draft mode)\r\n location = self.old_course_key.make_usage_key('discussion', name='Discussion1')\r\n self.draft_mongo.delete_item(location)\r\n # remove pointer from draft vertical (verify presence first to ensure process is valid)\r\n self.assertIn(location, draft_vert.children)\r\n draft_vert.children.remove(location)\r\n # move the other child\r\n other_child_loc = self.old_course_key.make_usage_key('html', name='Html2')\r\n draft_vert.children.remove(other_child_loc)\r\n other_vert = self.draft_mongo.get_item(self.old_course_key.make_usage_key('vertical', name='Vert2'), 0)\r\n other_vert.children.append(other_child_loc)\r\n self.draft_mongo.update_item(draft_vert, self.userid)\r\n self.draft_mongo.update_item(other_vert, self.userid)\r\n # publish\r\n self._xmodule_recurse(\r\n draft_vert,\r\n lambda i: self.draft_mongo.publish(i.location, self.userid)\r\n )\r\n item = self.old_mongo.get_item(draft_vert.location, 0)\r\n self.assertNotIn(location, item.children)\r\n with self.assertRaises(ItemNotFoundError):\r\n self.draft_mongo.get_item(location)\r\n self.assertNotIn(other_child_loc, item.children)\r\n self.assertTrue(self.draft_mongo.has_item(other_child_loc), \"Oops, lost moved item\")", "def test_draft_container_preview_html(self):\r\n draft_unit = modulestore('draft').convert_to_draft(self.vertical.location)\r\n draft_child_container = modulestore('draft').convert_to_draft(self.child_container.location)\r\n draft_child_vertical = modulestore('draft').convert_to_draft(self.child_vertical.location)\r\n self.validate_preview_html(draft_unit, self.container_view,\r\n can_edit=True, can_reorder=True, can_add=True)\r\n self.validate_preview_html(draft_child_container, self.container_view,\r\n can_edit=True, can_reorder=True, can_add=True)\r\n self.validate_preview_html(draft_child_vertical, self.reorderable_child_view,\r\n can_edit=True, can_reorder=True, can_add=True)", "def test_get_recipe_information(self):\n pass", "def test_get_self(self):\n url = '/0/chefs'\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('chef', resp.data)\n self.assertEqual(self.CHEF_VIEW_KEYS - set(['followed']), set(resp.data['chef'].keys()))" ]
[ "0.6352454", "0.6243344", "0.60634345", "0.59144443", "0.58387285", "0.5779342", "0.56427056", "0.56400126", "0.56305224", "0.55719894", "0.55666673", "0.5556045", "0.5539719", "0.5524101", "0.552297", "0.5499391", "0.5492469", "0.5491132", "0.54893005", "0.54730755", "0.5471528", "0.54514354", "0.54232156", "0.54200125", "0.5413369", "0.5378644", "0.5366665", "0.53186685", "0.5301473", "0.5300218" ]
0.7398199
0
Test get books of a chef
def test_get_books(self): book1 = Book.objects.create(chef=self.user) book2 = Book.objects.create(chef=self.user, book_type=Book.TO_SELL) url = '/0/chefs/%i/books' % self.user.pk resp = self.client.get(url) self.assertPermissionDenied(resp) headers = self.login() resp = self.client.get(url, **headers) self.assertEqual(resp.status_code, 200) self.assertIn('books', resp.data) self.assertEqual(1, len(resp.data['books'])) keys = ('added', 'name', 'edit_date', 'chef', 'creation_date', 'id', 'nb_likes', 'nb_recipes', 'book_type', 'status', 'price', 'product_id') self.assertEqual(set(keys), set(resp.data['books'][0].keys()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_books_method(self):\n result = self.book.get_books()\n self.assertTrue(result)", "def test_read_book(self):\n\n delete_books()\n\n book = create_book(\"title one\")[\"book\"]\n\n with test_client.get(\"/book/{}/\".format(book[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(response.get_data(as_text=True)),\n {\n \"status\": \"success\",\n \"book\": book\n }\n )\n\n \"\"\"\n clear the table, create several books and read them\n \"\"\"\n\n delete_books()\n\n book_one = create_book(\"title one\")[\"book\"]\n book_two = create_book(\"title two\")[\"book\"]\n\n with test_client.get(\"/book/{}/\".format(book_one[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(response.get_data(as_text=True)),\n {\n \"status\": \"success\",\n \"book\": book_one\n }\n )\n\n with test_client.get(\"/book/{}/\".format(book_two[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(response.get_data(as_text=True)),\n {\n \"status\": \"success\",\n \"book\": book_two\n }\n )", "def test_get_all_books(self):\n\n\t\t# create book\n\t\tbook_1 = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\t\tbook_2 = {\n\t\t\t'title': 'Hello Books 2',\n\t\t\t'isbn': '8765456766'\n\t\t}\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tpost_book_1 = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(book_1)\n\t\t)\n\n\t\tpost_book_2 = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(book_2)\n\t\t)\n\n\t\tres = self.client.get(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'))\n\n\t\tres_data = json.loads(res.data.decode())\n\t\tself.assertEqual(len(res_data.get('books')), 2)", "def test_get_specific_book_method(self):\n # When book id is int\n book_id = 1\n result = self.book.get_book(book_id)\n self.assertEqual(result, [{\"Title\": \"Harry Potter and Chamber of Secrets\",\n \"Author\": \"J.K Rowling\",\n \"Copies\": 2}])", "def test_get_book(self):\n\n response = self.client.post(\n '/api/v1/books', data=json.dumps(add_book), content_type='application/json',\n headers=self.get_admin_token())\n response1 = self.client.get(\n '/api/v1/books/NJCF4001', content_type='application/json', headers=self.get_admin_token())\n result = json.loads(response1.data.decode())\n self.assertEqual(result['message'],\n 'Retrieved successfully')\n assert response1.status_code == 200", "def test_get_books(self):\n response = self.client.post(\n '/api/v1/books', data=json.dumps(add_book), content_type='application/json',\n headers=self.get_admin_token())\n response1 = self.client.get(\n '/api/v1/books', content_type='application/json', headers=self.get_token())\n result = json.loads(response1.data.decode())\n self.assertEqual(result['message'],\n \"Retrieved successfully\")\n assert response1.status_code == 200", "def test_search_book_id(self):\n response = self.client.open(\n '/juanbedoya29/ApiRest/1.0.0/books/2',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_recipes(self):\n r1 = Recipes.objects.create(chef=self.user, name=\"Recipe 1\", draft=True)\n r2 = Recipes.objects.create(chef=self.user, name=\"Recipe 2\", draft=False)\n r3 = Recipes.objects.create(chef=self.user, name=\"Recipe 3\", draft=False)\n book = Book.objects.create(chef=self.user, book_type=Book.TO_SELL)\n book.add_recipe(r3)\n\n url = '/0/chefs/%i/recipes' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('recipes', resp.data)\n self.assertEqual(1, len(resp.data['recipes']))\n keys = (\"liked\", \"public_url\", \"edit_date\", \"ingredients\", \"shared\", \"tags\", \"commented\",\n \"private\", \"id\", \"chef\", \"reported\", \"nb_shares\", \"added\", \"nb_added\",\n \"nb_comments\", \"draft\", \"commensals\", \"creation_date\", \"nb_likes\", \"name\",\n \"products\", \"prep_time\", \"serves\", \"bought\", \"book_for_sale\", \"description\")\n self.assertEqual(set(keys), set(resp.data['recipes'][0].keys()))\n self.assertEqual(r2.pk, resp.data['recipes'][0]['id'])", "def test_get_recipe_information(self):\n pass", "def test_get_random_recipes(self):\n pass", "def test_search_recipes(self):\n pass", "def test_retrieve_recipes(self):\n sample_recipe(name=\"Avocado toast\")\n sample_recipe(name='Baklava')\n\n res = self.client.get(RECIPES_URL)\n\n recipes = Recipe.objects.all()\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(res.data, serializer.data)", "def test_books_list(self):\n\n delete_books()\n book_one = create_book(\"title one\")[\"book\"]\n\n with test_client.get(\"/book/list/\") as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"books\": [\n book_one\n ]\n }\n )\n\n \"\"\" \n clear the table and create several books\n \"\"\"\n\n delete_books()\n\n book_one = create_book(\"title one\")[\"book\"]\n book_two = create_book(\"title two\")[\"book\"]\n\n with test_client.get(\"/book/list/\") as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"books\": [\n book_one,\n book_two\n ]\n }\n )", "def test_get_book(session, client, book1_dict, book2_dict, book3_dict, expected_book3_fulldict):\n get_response = client.get(\"/books/1\")\n assert 404 == get_response.status_code\n\n \"\"\"Add data to db\"\"\"\n json_data = json.dumps(book1_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n json_data = json.dumps(book2_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n json_data = json.dumps(book3_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n \"\"\"get on non-empy resource\"\"\"\n get_response = client.get(\"/books/3\")\n assert 200 == get_response.status_code\n payload = get_response.get_json()\n assert expected_book3_fulldict == payload\n\n \"\"\"get with invalid input\"\"\"\n get_response = client.get(\"/books/L\")\n assert 400 == get_response.status_code\n\n \"\"\"get with non-existant resource\"\"\"\n get_response = client.get(\"books/7\")\n assert 404 == get_response.status_code\n\n \"\"\"get with out of range\"\"\"\n get_response = client.get(\"books/-1\")\n assert 400 == get_response.status_code", "def test_search_by_ISBN(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 0)\n s1.add_resource(b1)\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 1)", "def test_load_book_details(self, mock_get):\n\n c = Client()\n data = {\n 'search_type': self.filter_subject,\n 'search_value': self.subject,\n }\n response = c.get('/taric_books/%s/' % self.ISBN)\n\n self.assertEqual(response.status_code, 200)", "def test_get_all_books(self):\n create_admin()\n response = self.client().post('/api/v1/login', json=self.test_admin)\n json_data = json.loads(response.data)\n access_token = json_data.get('access_token')\n self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n response = self.client().get('/api/v1/products')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Books'))\n self.assertEqual(response.status_code, 200)", "def test_retrieve_recipes(self):\n sample_recipe(user=self.user)\n sample_recipe(user=self.user, title=\"Beans\")\n\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.all().order_by('-id')\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(res.data, serializer.data)", "def test_book_list(self):\n response = self.client.get(self.url_list, format=\"application/json\")\n self.assertTrue(response.status_code == 200)\n self.assertEqual(\n json.loads(response.content)[0][\"name\"], self.book.name)", "def test_get_recipe_information_bulk(self):\n pass", "def test_get_books(session, client, book1_dict, book2_dict, book3_dict, expect_book1_dict, expect_book2_dict,\n expect_book3_dict):\n expected_payload = []\n\n \"\"\"\n Test case for resource that contains no records\n \"\"\"\n response = client.get('/books')\n assert response.status_code == 200\n response = json.loads(response.data.decode('utf8'))\n assert expected_payload == response\n\n\n \"\"\"\n Test case for resource that contains records.\n \"\"\"\n \"\"\"Add book to database\"\"\"\n json_data = json.dumps(book1_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n get_response = client.get('/books')\n assert get_response.status_code == 200\n expected_payload.append(expect_book1_dict)\n payload = get_response.get_json()\n assert expected_payload == payload\n\n \"\"\"\n adding Second book\n \"\"\"\n json_data = json.dumps(book2_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n get_response = client.get('/books')\n assert get_response.status_code == 200\n expected_payload.append(expect_book2_dict)\n payload = get_response.get_json()\n assert expected_payload == payload", "def test_retrieve_recipes(self):\n sample_quizz()\n sample_quizz()\n\n res = self.client.get(QUIZZES_URL)\n\n quizzes = Quizz.objects.all()\n serializer = QuizzSerializer(quizzes, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def test_retrieving_recipes(self):\n sample_recipe(user=self.user)\n sample_recipe(user=self.user)\n\n res = self.client.get(RECIPES_URL)\n\n recipes = Recipe.objects.all().order_by('-id')\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def test_get_food(self):\n pass", "def test_book_search_pagination(self):\n c = Client()\n # TODO implement a test\n pass", "def test_available_book():\n rep = RentRepository()\n rep.store( '23','12', '1', '1')\n try:\n\n idBook = '12'\n idCustomer = '22'\n flag = '1'\n id = '1'\n Validator.available_book(rep.get_all(), idBook)\n\n assert False\n\n except RepositoryExceptionRent as msg:\n assert True", "def test_books(self):\r\n link_re = re.compile(r'<(?P<link>[^>]+)>\\; rel=\\\"(?P<rel>[^\\\"]+)\\\"')\r\n\r\n response = self.get_resource('author-test-book',\r\n data=dict(author=self.author.pk))\r\n self.assertTrue(response.has_header(\"Link\"))\r\n self.assertEquals(\r\n response[\r\n \"Link\"], '<%s?page=2&author=5>; rel=\"next\"' % self.reverse('author-test-book')) # nolint\r\n # Get objects by links on Link header\r\n response = self.client.get(link_re.findall(response['Link'])[0][0])\r\n\r\n links = link_re.findall(response['Link'])\r\n\r\n self.assertEquals(links[0][0], '%s?page=3&author=5' %\r\n self.reverse('author-test-book'))\r\n self.assertEquals(links[0][1], 'next')\r\n\r\n self.assertEquals(\r\n links[1][0], '%s?author=5' % self.reverse('author-test-book'))\r\n self.assertEquals(links[1][1], 'previous')\r\n\r\n response = self.get_resource(\r\n 'author-test-book', data={\r\n 'author': self.author.pk, 'adr-max': 0\r\n })\r\n self.assertFalse(response.has_header(\"Link\"))\r\n\r\n response = self.get_resource(\r\n 'author-test-book',\r\n data={\r\n 'author': self.author.pk, 'adr-max': 'all'\r\n })\r\n self.assertEquals(response.status_code, 200)\r\n self.assertFalse(response.has_header(\"Link\"))", "def test_retrieve_recipes(self):\n sample_recipe(user = self.user)\n sample_recipe(user = self.user)\n\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.all().order_by('-id')\n serializer = RecipeSerializer(recipes, many=True) # many=true returns the data as a list\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def test_search_client_by_isbn(self, mock_get):\n\n response = isbn_utils.search_by(self.filter_isbn, self.ISBN)\n self.assertEqual(response.data, json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_ISBN_SEARCH_RESPONSE).read())[\"data\"])", "def book(self):\n self.client.get(f\"{host}/book/{COMPETITION}/{CLUB}\")" ]
[ "0.7180482", "0.7024727", "0.69065464", "0.6828925", "0.676341", "0.6632599", "0.652474", "0.65187514", "0.6503426", "0.65010756", "0.6440636", "0.64365", "0.6412048", "0.6401122", "0.6395345", "0.6385182", "0.6355214", "0.63172936", "0.628445", "0.62715685", "0.62551796", "0.6231025", "0.6203542", "0.617327", "0.61402947", "0.6136682", "0.6084847", "0.60773206", "0.6075688", "0.60756195" ]
0.71184486
1
Test get photos of a chef
def test_get_photos(self): recipe = Recipes.objects.create(chef=self.user, draft=False, private=False) photo = Photos.objects.create(recipe=recipe, photo_order=1) url = '/0/chefs/%i/photos' % self.user.pk resp = self.client.get(url) self.assertPermissionDenied(resp) headers = self.login() resp = self.client.get(url, **headers) self.assertEqual(resp.status_code, 200) self.assertIn('photos', resp.data) self.assertEqual(1, len(resp.data['photos'])) keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover', 'time', 'instructions', 'order', 'quantity') self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_chef_with_photo(self):\n url = '/0/chefs/' + str(self.user.pk)\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('chef', resp.data)\n self.assertNotIn('photo', resp.data['chef'])\n\n self.user.avatar_photos.create(s3_url='image') # Create photo\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('chef', resp.data)\n self.assertIn('photo', resp.data['chef'])\n keys = set(('id', 'url', 'creation_date', 'edit_date'))\n self.assertEqual(keys, set(resp.data['chef']['photo']))", "def test_list_image(self):\n pass", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_aws_service_api_image_get(self):\n pass", "def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)", "def test_photos_seq(pre_pop_transaction, rover_name, sol, camera):\n from mars_street_view.models import Photo\n data = Photo.get_rov_sol(rover_name, sol)\n photos_by_cam = data.get('photos_by_cam', {})\n photos = photos_by_cam.get(camera, [])\n urls_from_method = [photo.img_src for photo in photos]\n prev_url = ''\n for url in urls_from_method:\n assert url > prev_url\n prev_url = url", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_read_image(self):\n pass", "def test_aws_service_api_public_images_get(self):\n pass", "def test_get_ao_image(self):\n response = self.client.open(\n '/rui-support/ao-image',\n method='GET',\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_photos_paging(self):\n pass", "def test_list_image_metadata(self):\n pass", "def test_images_add(self):\n response = self.client.get('/images/photos/add')\n self.assertEqual(response.status_code, 200)", "def test_users_photos_view_set_get_own_photos(self):\n # Create user and data\n user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='aov_hov')\n category = photo_models.PhotoClassification.objects\\\n .create_or_update(name='Test', classification_type='category')\n\n photo1 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo1-min.jpg', 'rb')), user=user)\n photo1.save()\n photo1.category.set([category])\n photo1.save()\n\n photo2 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo2-min.jpg', 'rb')), user=user)\n photo2.save()\n photo2.category.set([category])\n photo2.save()\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(user.id), format='json')\n result = request.data['results']\n\n self.assertEquals(request.status_code, 200)\n self.assertEquals(len(result), 2)\n self.assertEquals(result[0]['id'], photo2.id) # newest first\n self.assertIn('dimensions', result[0])\n self.assertIn('image_blurred', result[0])\n self.assertIn('image', result[0])", "def test_aws_service_api_public_image_get(self):\n pass", "def test_getThumbnail(self):\n cases = [\n (self.test_eac + 'NE00001.xml', False),\n (self.test_eac + 'NE00100.xml', True),\n (self.test_eac + 'NE01101.xml', True),\n (self.test_eac + 'NE01400.xml', True),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source, 'http://www.findandconnect.gov.au')\n self.assertNotEqual(None, doc)\n result = doc.getThumbnail()\n self.assertEqual(expected, result != None)", "def test_aws_service_api_private_images_get(self):\n pass", "def getimgs():", "def main():\n images = Images()\n #print images.create_image_urls()\n print images.get_image_random()\n print images.get_image(12)", "def test_is_image(self):\n os.chdir(\"testimages/\")\n self.assertTrue(fileactions.is_image(\"arch_001.jpg\"))\n self.assertFalse(fileactions.is_image(\"not_an_image.jpg\"))", "def test_getImages(self): # GIVEN the group chat has at least one image\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n imageList = testBot.run() #AND THEN post_images calls the private get_images method which returns an array\n self.assertTrue(len(imageList) > 0) #THEN there should be at least one element in the array", "def test_basic_run(self):\n self.expect_datatore_lookup('SomeBlobKey', True)\n self.expect_open_image('SomeBlobKey', (1600, 1200))\n self.expect_resize(blob_image._DEFAULT_SERVING_SIZE)\n self.expect_encode_image('SomeImageInJpeg')\n self.run_request('image/jpeg', 'SomeImageInJpeg')", "def testImagesPresent(self):\n\n result = self.app.get('/')\n\n images = result.html.find_all('img')\n\n # expect to find three images\n self.assertEqual(3, len(images), \"Wrong number of images found\")\n\n flowtows = result.html.find_all(class_='flowtow')\n\n image_list = self.images\n\n self.assertEqual(3, len(flowtows))\n\n # each contains the image, date, author and likes\n for index in range(3):\n div = flowtows[index]\n (path, date, user, likes) = image_list[index]\n\n self.assertIn(date, div.text)\n self.assertIn(user, div.text)\n # look for the number of likes\n self.assertIn(str(len(likes)+1), div.text, \"expected to find %d likes mentioned in:\\n\\n%s\" % (len(likes), div))\n\n # look for just one image\n img = div.find_all('img')\n self.assertEqual(1, len(img))", "def test_RawRun_imagepaths():\n p1 = r.imagepaths[0]\n path = 'tests/data/synced/r11_07_06c/cam1/img_0001.jpg'\n assert(os.path.samefile(p1, path))\n assert_equal(len(r.imagepaths), 6)", "def test_activity_photos(self):\n activity = self.client.get_activity(152668627)\n self.assertTrue(activity.photo_count > 0)\n photos = list(activity.photos)\n self.assertEqual(len(photos), 1)\n self.assertEqual(len(photos), activity.photo_count)\n self.assertIsInstance(photos[0], model.ActivityPhoto)", "def test_photos_sorted(pre_pop_transaction, rover_name, sol, camera):\n from mars_street_view.models import Photo\n data = Photo.get_rov_sol(rover_name, sol)\n photos_by_cam = data.get('photos_by_cam', {})\n photos = photos_by_cam.get(camera, [])\n urls_from_method = [photo.img_src for photo in photos]\n assert urls_from_method == list(sorted(urls_from_method))", "def test_signup_photo(self, mocked_sendy):\n url = '/0/chefs'\n data = {\n 'email': '[email protected]',\n 'password': 'secret',\n 'name': 'John',\n 'surname': 'Doe',\n 'language': 'es',\n 'photo': IMAGES['png'],\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('auth', resp.data)\n self.assertIn('token', resp.data['auth'])\n # Check that the photo exists\n self.assertTrue(Chefs.objects.last().avatar_photos.all())", "def test_users_photos_view_set_get_successful(self):\n # Create user and data\n access_user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='m')\n user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='aov_hov')\n category = photo_models.PhotoClassification.objects\\\n .create_or_update(name='Test', classification_type='category')\n\n photo1 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo1-min.jpg', 'rb')), user=user)\n photo1.save()\n photo1.category.set([category])\n photo1.save()\n\n photo2 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo2-min.jpg', 'rb')), user=user)\n photo2.save()\n photo2.category.set([category])\n photo2.save()\n\n # Simulate auth\n token = test_helpers.get_token_for_user(access_user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(user.id), format='json')\n result = request.data['results']\n\n self.assertEquals(request.status_code, 200)\n self.assertEquals(len(result), 2)\n self.assertEquals(result[0]['id'], photo2.id) # newest first\n self.assertIn('dimensions', result[0])\n self.assertIn('image_blurred', result[0])\n self.assertIn('image', result[0])", "def test_aws_service_api_private_image_get(self):\n pass" ]
[ "0.7743782", "0.6829301", "0.67836845", "0.6772251", "0.6643747", "0.6615853", "0.65598196", "0.6522996", "0.6504507", "0.6496941", "0.6473579", "0.64030343", "0.63989407", "0.6277306", "0.6260105", "0.622099", "0.6200895", "0.61987615", "0.6191987", "0.61844575", "0.61583775", "0.61420614", "0.61278677", "0.60738564", "0.6042478", "0.6040225", "0.60303795", "0.6006741", "0.6003057", "0.5996467" ]
0.7300395
1
Test registration with facebook
def test_signup_facebook(self, mocked_facebook, mocked_sendy): url = '/0/chefs' data = { 'email': '[email protected]', 'fb_access_token': 'TOKEN', 'name': 'John', 'surname': 'Doe', 'language': 'es', } mocked_facebook.return_value = { 'id': '1401481816739108', 'email': '[email protected]' } resp = self.client.post(url, data=data) self.assertEqual(resp.status_code, 200) # Test that we can log in url = '/0/chefs/login' data = { 'email': '[email protected]', 'fb_access_token': 'TOKEN', } resp = self.client.post(url, data=data) self.assertEqual(resp.status_code, 200) # Test that we cannot log in without password data_set = [ {'email': '[email protected]'}, {'email': '[email protected]', 'password': ''}] for data in data_set: resp = self.client.post(url, data=data) self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_signup_login_facebook(self, mocked_facebook, mocked_sendy):\n url = '/0/chefs'\n data = {\n 'email': '[email protected]',\n 'fb_access_token': 'TOKEN',\n 'name': 'John',\n 'surname': 'Doe',\n 'language': 'es',\n }\n mocked_facebook.return_value = {\n 'id': '1401481816739108',\n 'email': '[email protected]'\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('auth', resp.data)\n self.assertIn('token', resp.data['auth'])\n\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('auth', resp.data)\n self.assertIn('token', resp.data['auth'])", "def test_valid_registration(self):\n r = dict(\n email='[email protected]',\n username='crow',\n password='I_do_not_caw',\n confirm_password='I_do_not_caw',\n first_name='magpie',\n last_name='corvid'\n )\n resp = self.client.post('/user/register', data=r, follow_redirects=True)\n self.assertEquals(resp.status_code, 200)", "def test_fb(self, username, password):\n self.driver.get('https://www.facebook.com/')\n name = self.driver.find_element(By.XPATH, \"//input[@id='email']\")\n pass_word = self.driver.find_element(By.XPATH, \"//input[@id='pass']\")\n name.send_keys(username)\n pass_word.send_keys(password)", "def test_0050_registration_post_1(self):\n response = self.fetch(\n '/registration', method=\"POST\", follow_redirects=False,\n body=urlencode({'name':'anoop', 'email':'[email protected]',\n 'password':'openlabs', 'confirm_password':'wrong'}\n )\n )\n self.assertEqual(response.code, 200)\n self.assertEqual(\n response.body.count(\n u'There were error(s) in processing your registration.'\n ), 1\n )", "def test_register_new_user(self):\n with self.client:\n response = self.client.post(\n url_for('register'),\n data=dict(\n first_name='Admin',\n last_name='Admin',\n email='[email protected]',\n password='admin2016',\n confirm_password='admin2016'\n ),\n follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)", "def test_user_registration(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(1,result,\"User registration successful\")", "def test_40_facebook_oauth_creation(self):\r\n fake_response = {\r\n u'access_token': u'access_token',\r\n u'token_type': u'Bearer',\r\n u'expires_in': 3600,\r\n u'id_token': u'token'}\r\n\r\n fake_user = {\r\n u'username': u'teleyinex',\r\n u'first_name': u'John',\r\n u'last_name': u'Doe',\r\n u'verified': True,\r\n u'name': u'John Doe',\r\n u'locale': u'en_US',\r\n u'gender': u'male',\r\n u'email': u'[email protected]',\r\n u'quotes': u'\"quote',\r\n u'link': u'http://www.facebook.com/johndoe',\r\n u'timezone': 1,\r\n u'updated_time': u'2011-11-11T12:33:52+0000',\r\n u'id': u'11111'}\r\n\r\n self.register()\r\n self.signout()\r\n\r\n from pybossa.view import facebook\r\n response_user = facebook.manage_user(fake_response['access_token'],\r\n fake_user, None)\r\n\r\n assert response_user is None, response_user", "def test_register(self):\n users = User.objects.filter(username='test')\n self.assertTrue(len(users) == 0)\n\n username = \"test3\"\n data = {'username': username, 'password': \"123test\", 'email': '[email protected]',\n 'newsletter': 'false', 'research': 'true', 'device': self.device}\n\n response = self.requestRegistration(data)\n\n self.assertTrue('client_id' in response.data)\n self.assertTrue(not 'password' in response.data)\n\n users = User.objects.filter(username=username)\n self.assertTrue(len(users) == 1)\n user = users[0]\n profile = user.user_profile\n self.assertTrue(profile.research)\n self.assertFalse(profile.newsletter)\n\n phone = Device.objects.get(user=user)\n\n self.assertTrue(phone.uuid == self.uuid)\n self.assertTrue(phone.cordova == self.device['cordova'])", "def test_39_facebook_oauth_creation(self):\r\n fake_response = {\r\n u'access_token': u'access_token',\r\n u'token_type': u'Bearer',\r\n u'expires_in': 3600,\r\n u'id_token': u'token'}\r\n\r\n fake_user = {\r\n u'username': u'teleyinex',\r\n u'first_name': u'John',\r\n u'last_name': u'Doe',\r\n u'verified': True,\r\n u'name': u'John Doe',\r\n u'locale': u'en_US',\r\n u'gender': u'male',\r\n u'email': u'[email protected]',\r\n u'quotes': u'\"quote',\r\n u'link': u'http://www.facebook.com/johndoe',\r\n u'timezone': 1,\r\n u'updated_time': u'2011-11-11T12:33:52+0000',\r\n u'id': u'11111'}\r\n\r\n from pybossa.view import facebook\r\n response_user = facebook.manage_user(fake_response['access_token'],\r\n fake_user, None)\r\n\r\n user = db.session.query(User).get(1)\r\n\r\n assert user.email_addr == response_user.email_addr, response_user", "def test_successful_registration(self):\n response = self.client.post('/o/register', {\n 'email': '[email protected]',\n 'password': '123new',\n 'terms_acceptance': True,\n })\n self.assertRedirects(\n response,\n settings.ANGULAR_ROOT,\n 302,\n fetch_redirect_response=False,\n )\n self.assertEqual(User.objects.all().count(), 1)", "def test_register(self):\n url = reverse('profiles-register')\n data = {'name': 'test',\n 'user': {'full_name': 'test user',\n 'email': '[email protected]',\n 'password_1': 'test',\n 'password_2': 'test',\n },\n 'username_available': \"Available\"\n }\n with self.settings(MAILER_EMAIL_BACKEND=\"mailer.tests.TestMailerEmailBackend\"):\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(response.data.has_key('token'))\n self.assertEqual(Message.objects.count(), 1)", "def test_01_account_register(self):\n self.register()\n self.assertEquals(\n self.selenium.current_url, self.get_absolute_url())\n print 'test_register_valid_password completed'", "def test_registration(self):\n\n print(\" --------------------------- Test 1 - Registration ----------------------------\")\n user_id = uuid.uuid4()\n password = \"my-precious\"\n currency = \"EUR\"\n\n response = register_user(user_id, password, currency)\n data = response.json()['message']\n self.assertEqual(response.json()['code'], 201)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(response.headers['Content-Type'] == 'application/json')\n print(json.dumps(data, indent=4))", "def test_registration(self):\n response = self.client_app.post(\n '/api/v1/auth/signup/',\n data=json.dumps(dict(\n last_name='james',\n email='[email protected]',\n password='123456sddfdf'\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['email'] == '[email protected]')\n self.assertTrue(data['first_name'] is None)\n self.assertTrue(data['last_name'] == 'james')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 201)", "def test_registration(self):\n # Make sure that specific user doesn't exist\n # before registration\n with self.assertRaises(Profile.DoesNotExist):\n Profile.objects.get(username='temp2')\n self.assertEqual(len(mail.outbox), 0)\n response = self.client.post(reverse('users:registration'), data={\n 'username': 'temp2',\n 'email': '[email protected]',\n 'password1': 'hardpwd123',\n 'password2': 'hardpwd123',\n }, follow=True)\n\n # And after registration, the user is created\n created_user = Profile.objects.get(username='temp2')\n token = created_user.email_verification.token\n\n # Make sure that client see success message\n soup = BeautifulSoup(response.content, 'html.parser')\n success_msg = soup.find('p', 'success-registration')\n\n self.assertEqual(\n success_msg.text,\n ('We sent email confirmation link to your'\n ' email box. (Don\\'t forget to check spam box)')\n )\n\n # Check for sent email.\n \n self.assertEqual(len(mail.outbox), 1)\n self.assertIn('Follow this link to confirm your email address:', mail.outbox[0].body)", "def test_user_registration(self):\n response = self.client.post(SIGNUP_URL,\n data=json.dumps(self.user_data), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"],\n \"registration successful, now login\")", "def test_register(self):\n selenium = self.selenium\n # Opening the link we want to test\n selenium.get(self.live_server_url + '/account/signup/')\n # find the form element\n username = selenium.find_element_by_id('id_username')\n password1 = selenium.find_element_by_id('id_password1')\n password2 = selenium.find_element_by_id('id_password2')\n submit = selenium.find_element_by_xpath('//button[text()=\"Sign up\"]')\n\n # Fill the form with data\n username.send_keys('some username')\n password1.send_keys('123456')\n password2.send_keys('123456')\n\n # submitting the form\n submit.click()\n\n # check the returned result\n assert 'Username' in selenium.page_source", "def test_facebook_login(self):\n response = self.client.get('/login/facebook/')\n self.assertEqual(response.status_code, 302)", "def test_register_user(self):\n response = self.signup_a_user(self.user_data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['message'],\n \"User successfully created. Check email for \"\n \"verification link\")", "def test_user_registeration(self):\n with self.client:\n response = self.client.post('/users/signup', data=dict(\n username='tigarcia',password='moxies',name=\"Tim\",email=\"[email protected]\"\n ), follow_redirects=True)\n self.assertIn(b'Welcome', response.data)\n self.assertTrue(current_user.username == \"tigarcia\")\n # make sure we hash the password!\n self.assertNotEqual(current_user.password, \"moxies\")\n self.assertTrue(current_user.is_authenticated)", "def test_0060_registration_post_2(self):\n user = User(name=\"Test User\", email=\"[email protected]\")\n user.set_password(\"password\")\n user.save()\n response = self.fetch(\n '/registration', method=\"POST\", follow_redirects=False,\n body=urlencode({'name':'anoop', 'email':'[email protected]',\n 'password':'openlabs', 'confirm_password':'openlabs'}\n )\n )\n self.assertEqual(response.code, 200)\n self.assertEqual(\n response.body.count(u'This email is already registered.'), 1\n )", "def test_signup(self):\n res = self.client.get(\"/registration\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Create Account\" in data", "def test_registration(self):\n response = self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n username='joe',\n password='123456'\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Successfully registered.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 201)", "def test_get_chef_facebook_id(self):\n url = '/0/facebook'\n\n self.user.fb_user_id = 'FB_ID'\n self.user.fb_access_token = 'TOKEN'\n self.user.save()\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data, {'response': {'fb_user_id': 'FB_ID'}})", "def test_register(self):\n # Register good data\n data = mock_data['register']\n data = json.dumps(data)\n response = self.client.post(\n 'api/v2/auth/signup', content_type=\"application/json\", data=data)\n data = json.loads(response.data)\n self.assertEqual(data['message'], 'User registered')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('user' in data)", "def test_register(self):\n\t\turl = '/register/'\n\t\tdata = {'username' : 'testUser1234', 'password' : 'pass12345'} # The amazing password...\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(User.objects.count(), 2)\n\t\t# Every user must be created with its Profile\n\t\tself.assertEqual(Profile.objects.count(), 1)\n\t\t# The initial user doesn't have a Profile. So it has to be 1.\n\t\tself.assertEqual(User.objects.get(username='testUser1234').username, 'testUser1234')", "def create_test_user(self):\n response = urllib2.urlopen('https://graph.facebook.com/%s/accounts/test-users?installed=true&name=test_acc&locale=en_US&permissions=publish_stream&method=post&access_token=%s' % (self.consumer_key, self.token))", "def test_register(self):\n u = User(first_name = \"David\",\n last_name = 'Smith',\n password='******',\n email='[email protected]',\n phone_number='012-345-6789')\n response = self.register({\n 'first_name': u.first_name,\n 'last_name': u.last_name,\n 'password': u.password,\n 'email': u.email,\n 'phone_number': u.phone_number\n })\n self.assertEqual(response.status_code, 302)\n response = self.client.get(response.url)\n self.assertDictEqual(response.json(), self.client.get(reverse('backend:user_details', args=(response.json()['pk'],))).json())", "def test_facebook_login(self, get_object):\n get_object.return_value = self.return_val\n self.test_client().post(self.facebook_url, self.facebook_token, format=\"json\")\n response = self.client.post(self.facebook_url,\n self.facebook_token, format=\"json\")\n self.assertEqual(response.status_code, 200)", "def test_register(self):\n client = APIClient()\n payload = {\n 'email': self.email,\n 'first_name': self.first_name,\n 'last_name': self.last_name,\n 'oauth_id': self.oauth_id,\n 'password': self.password\n }\n response = client.post('/users/', payload, format='json')\n\n # Check the response status code\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Check the user was correctly created directly in the database\n user = User.objects.get(email=self.email)\n self.assertNotEqual(user.password, self.password)\n self.assertTrue(user.check_password(self.password))\n\n # Check the user was correctly created by doing an HTTP query\n response = client.get('/users/').json()\n self.assertEqual(len(response), 1)\n self.assertEqual(response[0]['email'], self.email)\n self.assertFalse('password' in response[0])" ]
[ "0.7900534", "0.741936", "0.73794997", "0.7275501", "0.7215751", "0.72008", "0.7192416", "0.7087651", "0.707313", "0.7060629", "0.70167506", "0.70075756", "0.69956833", "0.69721985", "0.6927428", "0.6919743", "0.6918986", "0.6916858", "0.69100404", "0.6909286", "0.6903661", "0.6903323", "0.690055", "0.68747467", "0.68680084", "0.6866145", "0.6846124", "0.6836494", "0.683483", "0.682687" ]
0.77579355
1
Test registration with a photo
def test_signup_photo(self, mocked_sendy): url = '/0/chefs' data = { 'email': '[email protected]', 'password': 'secret', 'name': 'John', 'surname': 'Doe', 'language': 'es', 'photo': IMAGES['png'], } resp = self.client.post(url, data=data) self.assertEqual(resp.status_code, 200) self.assertIn('auth', resp.data) self.assertIn('token', resp.data['auth']) # Check that the photo exists self.assertTrue(Chefs.objects.last().avatar_photos.all())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_get_registered(self):\n img = BytesIO(b'images/Screen_Shot_2019-12-24_at_12.33.34.png')\n img.name = \"myimage.png\"\n url = reverse(\"register_user\")\n response = self.client.post(url, { \"username\": \"janedoe\", \"email\":\"[email protected]\", \"password\":\"123\", \"photo\": img})\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, reverse(\"login_user\"), 302)", "def test_user_photo_creation_succeeds(self):\n\t\timage_name = 'test1.png'\n\t\twith open(self.img_url, 'rb') as image:\t\t\t\n\t\t\tdata = {'image': image, 'name':image_name}\t\t\t\n\t\t\tresponse = self.client.post(reverse('photos'), data)\n\t\t\timage.close()\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data.get('name'), image_name)", "def test_upload_photo(self):\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.normal_token.key)\n\n url = reverse('crew-api:upload-photo', args=[self.normal_user.crew.uuid])\n\n photo_file = self.generate_photo_file()\n\n data = {\n 'photo':photo_file\n }\n\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_logged_in_user_doesnt_register(self):\n img = BytesIO(b'images/Screen_Shot_2019-12-24_at_12.33.34.png')\n img.name = \"myimage.png\"\n user = User.objects.create_user(username=\"jdoe\", email=\"[email protected]\", password=\"123\", photo=img.name)\n self.client.login(username = \"jdoe\", password=\"123\")\n url = reverse(\"register_user\")\n response = self.client.post(url, { \"username\": \"\", \"email\":\"[email protected]\", \"password\":\"123\", \"photo\":img.name})\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, reverse(\"home\"), 302)", "def test_create_photo(self, api_client, test_user):\n photo_1 = sample_photo(user=test_user, title='Home')\n\n payload = {\n 'title': 'Home'\n }\n api_client.force_authenticate(test_user)\n res = api_client.post(PHOTO_URL, payload)\n assert res.status_code == status.HTTP_201_CREATED", "def test_procedure_picture_upload(self):\n image_upload_url = PROCEDURE_URL\n\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n\n payload = {\n 'name': 'temp',\n 'speciality': [self.speciality.pk],\n 'image': ntf,\n 'overview': 'bla bla bla'\n }\n\n res = self.client.post(\n image_upload_url,\n payload,\n format=\"multipart\"\n )\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertIn('image', res.data)", "def test_upload_profile_pic(self):\n url = 'https://cdn.business2community.com/wp-content/uploads/2017/08/blank-profile-picture-973460_640.png'\n\n details = self.new_user.upload_profile_pic(url)\n\n self.assertEqual(self.new_user.profile_pic, details.get('url'))\n destroy(details.get('public_id'))\n\n # Test if invalid image path is inserted\n with self.assertRaises(Exception):\n details = self.new_user.upload_profile_pic('Random path')\n self.assertEqual(self.new_user.profile_pic, details.get('url'))", "def test_add_new_photo(self):\n\n result = self.client.post(\"/add_new_photo/2\",\n data={\"park_id\": \"2\", \"user_id\": 25, \"description\": \"Taurus having a great time!\", \"photo\": \"http://s3-media3.fl.yelpcdn.com/bphoto/ZUhteyc0UWdWbgM-4VbZhw/o.jpg\"},\n follow_redirects=True)\n self.assertIn(\"<h1 class=\\\"title\\\">Bark Park!</h1>\", result.data)", "def test_create(self):\n data = {\n 'image': images.load_image()\n }\n photo = Photo.objects.create(**data)\n self.assertTrue(photo.pk)", "def test_exists(self):\n self.assertTrue(bool(self.photo))", "def test_save_profile_with_existing_photo(self):\n # Set a user with a photo\n user = UserFactory.create()\n file_path = os.path.join(os.path.dirname(__file__), \"normal_photo.jpg\")\n self._upload_photo(user, file_path)\n\n # Re-save profile without uploading a new photo.\n data = {\n \"full_name\": user.userprofile.full_name,\n \"email\": user.email,\n \"username\": user.username,\n \"lat\": 40.005814,\n \"lng\": -3.42071,\n \"externalaccount_set-MAX_NUM_FORMS\": \"1000\",\n \"externalaccount_set-INITIAL_FORMS\": \"0\",\n \"externalaccount_set-TOTAL_FORMS\": \"0\",\n \"language_set-MAX_NUM_FORMS\": \"1000\",\n \"language_set-INITIAL_FORMS\": \"0\",\n \"language_set-TOTAL_FORMS\": \"0\",\n \"basic_section\": \"\",\n }\n\n for field in UserProfilePrivacyModel._meta.fields:\n data[field.name] = MOZILLIANS\n data[\"privacy_tshirt\"] = PRIVATE\n\n with override_script_prefix(\"/en-US/\"):\n url = reverse(\"phonebook:profile_edit\")\n with self.login(user) as client:\n response = client.post(url, data=data, follow=True)\n eq_(response.status_code, 200)", "def test_user_profile_picture_invalid_image_fails(self):\n image_upload_url = PROCEDURE_URL\n\n payload = {\n 'name': 'temp',\n 'speciality': [self.speciality.pk],\n 'image': 'invalid image',\n 'overview': 'bla bla bla'\n }\n\n res = self.client.post(\n image_upload_url,\n payload,\n format=\"multipart\"\n )\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_image(self):\n pass", "def create_and_submit(self, username):\r\n user = UserFactory.create()\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n user.profile.name = username\r\n attempt.upload_face_image(\"Fake Data\")\r\n attempt.upload_photo_id_image(\"More Fake Data\")\r\n attempt.mark_ready()\r\n attempt.submit()\r\n return attempt", "def main(uid, password, photo, users_service, auth_service, photos_service):\n user = users_service.get_user_by_id(uid)\n auth_service.authenticate(user, password)\n photos_service.upload_photo(user['uid'], photo)", "def create_and_submit(self):\r\n user = UserFactory.create()\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n user.profile.name = u\"Rust\\u01B4\"\r\n\r\n attempt.upload_face_image(\"Just pretend this is image data\")\r\n attempt.upload_photo_id_image(\"Hey, we're a photo ID\")\r\n attempt.mark_ready()\r\n attempt.submit()\r\n\r\n return attempt", "def test_user_photo_retrieval_by_name_succeeds(self):\n\t\t# url = reverse('photodetail')\t\n\t\tself.created_image = UserPhoto(image=self.image, name=self.image_name, created_by=self.user)\n\t\tself.created_image.save()\n\t\tresponse = self.client.get('/api/image/?name={}'.format(self.created_image.name))\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data.get('name'), self.image_name)\n\t\t\n\t\tos.remove('static/media/' + str(self.created_image.image))\n\t\tdel self.created_image", "def test_create_image_signature(self):\n pass", "def test_upload(self):\n with self.client:\n file = dict(\n file=(BytesIO(b'my file contents'), \"foto.jpg\"),\n )\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=file)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertIn('foto.jpg', os.listdir(PHOTOS_SAVE_PATH))\n self.assertIn('foto.jpg', [photo.filename for photo in Photo.query.all()])", "def test_user_photo_retrieval_by_id_succeeds(self):\n\t\t# url = reverse('photodetail')\t\n\t\tself.created_image = UserPhoto(image=self.image, name=self.image_name, created_by=self.user)\n\t\tself.created_image.save()\n\t\tresponse = self.client.get('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data.get('name'), self.image_name)\n\t\tos.remove('static/media/' + str(self.created_image.image))\n\t\tdel self.created_image", "def test_album_image_user(self):\n self.assertEqual(self.album.user, self.photo.user)", "def test_upload_image(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0) # go back to begining of file_path\n res = self.client.post(url, {'image': ntf, format: 'multipart'})\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "def test_user_registration(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(1,result,\"User registration successful\")", "def test_upload_image_to_reteta(self):\n url = image_upload_url(self.reteta.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(url, {'image': ntf}, format='multipart')\n self.reteta.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.reteta.image.path))", "def post(self, request, *args, **kwargs):\n form = RegisterForm(request.POST)\n image = request.FILES.get('image')\n\n if (form.is_valid()):\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password2')\n\n user = User.objects.create(username=username, password=password)\n profileImage = ProfileImage.objects.create(user=user, image=image)\n\n login(request, user)\n\n return redirect('/app')\n \n else:\n print(form.errors)\n return render(request, 'sign_up.html', { 'form': form })", "def test_uploading_image_to_recipe(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB',(10,10))\n img.save(ntf,format='JPEG')\n ntf.seek(0)\n res = self.client.post(url, {'image': ntf}, format='multipart')\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "def add_profile_photo():\n pass", "def test_create_photo_album(self, api_client, test_user):\n photo_1 = sample_photo(user=test_user, title='Home')\n photo_2 = sample_photo(user=test_user, title='Work')\n\n payload = {\n 'title': 'Holidays in LA',\n 'photos': [str(photo_1.id), str(photo_2.id)],\n }\n\n api_client.force_authenticate(test_user)\n res = api_client.post(PHOTO_ALBUM_URL, payload)\n print(res.data)\n assert res.status_code == status.HTTP_201_CREATED\n album = PhotoAlbum.objects.get(id=res.data['id'])\n photos = album.photos.all()\n assert photos.count() == 2", "def test_valid_registration(self):\n r = dict(\n email='[email protected]',\n username='crow',\n password='I_do_not_caw',\n confirm_password='I_do_not_caw',\n first_name='magpie',\n last_name='corvid'\n )\n resp = self.client.post('/user/register', data=r, follow_redirects=True)\n self.assertEquals(resp.status_code, 200)" ]
[ "0.7926338", "0.75307405", "0.7283007", "0.7208024", "0.71915627", "0.71540934", "0.68165654", "0.68001765", "0.6796672", "0.674159", "0.6666542", "0.6594835", "0.6593995", "0.6576625", "0.6517541", "0.6502099", "0.6501964", "0.6443999", "0.64113736", "0.6374036", "0.6367903", "0.6360116", "0.6354215", "0.633739", "0.6334598", "0.6310001", "0.6302298", "0.62902015", "0.6255117", "0.6248096" ]
0.7955134
0
Test get chef's facebook id
def test_get_chef_facebook_id(self): url = '/0/facebook' self.user.fb_user_id = 'FB_ID' self.user.fb_access_token = 'TOKEN' self.user.save() resp = self.client.get(url) self.assertPermissionDenied(resp) headers = self.login() resp = self.client.get(url, **headers) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.data, {'response': {'fb_user_id': 'FB_ID'}})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_chef_facebook_id(self):\n url = '/0/facebook'\n\n data = {'fb_user_id': 'FB_ID', 'fb_access_token': 'TOKEN'}\n\n resp = self.client.post(url, data)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n\n # Without data\n resp = self.client.post(url, **headers)\n self.assertEqual(resp.status_code, 400)\n\n resp = self.client.post(url, data, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data, {'response': {'return': True}})\n self.assertEqual(Chefs.objects.get(pk=self.user.pk).fb_user_id, 'FB_ID')\n self.assertEqual(Chefs.objects.get(pk=self.user.pk).fb_access_token, 'TOKEN')\n\n # Cannot reset while set\n resp = self.client.post(url, data, **headers)\n self.assertEqual(resp.status_code, 400)", "def test_delete_chef_facebook_id(self):\n url = '/0/facebook'\n\n self.user.fb_user_id = 'FB_ID'\n self.user.fb_access_token = 'TOKEN'\n self.user.save()\n\n resp = self.client.delete(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.delete(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data, {'response': {'return': True}})\n self.assertIsNone(Chefs.objects.get(pk=self.user.pk).fb_user_id)\n self.assertIsNone(Chefs.objects.get(pk=self.user.pk).fb_access_token)", "def test_get_chef_facebook_friends_to_follow(self, mocked_facebook):\n url = '/0/facebook/friends'\n\n mocked_facebook.return_value = {\n 'data': [\n {\n 'name': 'Friend1',\n 'id': 'FB_ID1'\n },\n {\n 'name': 'Friend2',\n 'id': 'FB_ID2'\n },\n {\n 'name': 'Friend3',\n 'id': 'FB_ID3'\n }\n ]\n }\n\n self.user.fb_user_id = 'FB_ID'\n self.user.fb_access_token = 'TOKEN'\n self.user.save()\n\n chef1 = self.create_user('1')\n chef1.fb_user_id = 'FB_ID1'\n chef1.save()\n\n self.user.follow(chef1)\n\n chef2 = self.create_user('2')\n chef2.fb_user_id = 'FB_ID2'\n chef2.save()\n\n chef3 = self.create_user('3')\n chef3.fb_user_id = 'FB_NON_FRIEND'\n chef3.save()\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('friends', resp.data)\n self.assertEqual(1, len(resp.data['friends']))\n self.assertEqual(chef2.email, resp.data['friends'][0]['email'])", "def test_facebook_login(self):\n response = self.client.get('/login/facebook/')\n self.assertEqual(response.status_code, 302)", "def test_meme_meme_id_get(self):\n pass", "def test_facebook_login(self, get_object):\n get_object.return_value = self.return_val\n self.test_client().post(self.facebook_url, self.facebook_token, format=\"json\")\n response = self.client.post(self.facebook_url,\n self.facebook_token, format=\"json\")\n self.assertEqual(response.status_code, 200)", "def search_username(ausername):\n print \"Searching: \" + ausername + \" -> \",\n url = \"http://www.findmyfbid.com/\"\n post_data = \"https://www.facebook.com/\" + ausername\n user_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:42.0) Gecko/20100101 Firefox/42.0\"\n headers = {'User-Agent': user_agent}\n req = requests.post(url, headers=headers, data = { \"url\": post_data})\n html_data = req.text\n soup = BeautifulSoup(html_data, 'html.parser')\n resp = str(soup.code)\n ugly1 = resp.split(\">\")\n ugly2 = ugly1[1].split(\"<\")\n if resp == \"<code>https://www.facebook.com</code>\":\n print \"No ID found :(\"\n else:\n print ugly2[0]", "def test_user_id_get(self):\n pass", "def test_id(self):\n result = self.test_client.id\n\n assert result == \"86576599\"", "def test_id(self):\n result = self.test_client.id\n\n assert result == \"10423098\"", "def test_signup_login_facebook(self, mocked_facebook, mocked_sendy):\n url = '/0/chefs'\n data = {\n 'email': '[email protected]',\n 'fb_access_token': 'TOKEN',\n 'name': 'John',\n 'surname': 'Doe',\n 'language': 'es',\n }\n mocked_facebook.return_value = {\n 'id': '1401481816739108',\n 'email': '[email protected]'\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('auth', resp.data)\n self.assertIn('token', resp.data['auth'])\n\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('auth', resp.data)\n self.assertIn('token', resp.data['auth'])", "def test_get_by_id(self):\n with self.client:\n self.client.post('/users/login', data=dict(\n username=\"eschoppik\", password='secret'\n ), follow_redirects=True)\n self.assertTrue(current_user.id == 1)\n self.assertFalse(current_user.id == 20)", "def test_getextid(self):\n crusoe = self.fixtures.crusoe\n email = crusoe.email.email\n service_facebook = 'facebook'\n\n externalid = models.UserExternalId( # NOQA: S106\n service=service_facebook,\n user=crusoe,\n userid=crusoe.email.email,\n username=crusoe.email.email,\n oauth_token=environ.get('FACEBOOK_OAUTH_TOKEN'),\n oauth_token_type='Bearer',\n )\n\n db.session.add(externalid)\n db.session.commit()\n result = models.getextid(service_facebook, userid=email)\n self.assertIsInstance(result, models.UserExternalId)\n assert '<UserExternalId {service}:{username} of {user}>'.format(\n service=service_facebook, username=email, user=repr(crusoe)[1:-1]\n ) in repr(result)", "def test_fb(self, username, password):\n self.driver.get('https://www.facebook.com/')\n name = self.driver.find_element(By.XPATH, \"//input[@id='email']\")\n pass_word = self.driver.find_element(By.XPATH, \"//input[@id='pass']\")\n name.send_keys(username)\n pass_word.send_keys(password)", "def test_39_facebook_oauth_creation(self):\r\n fake_response = {\r\n u'access_token': u'access_token',\r\n u'token_type': u'Bearer',\r\n u'expires_in': 3600,\r\n u'id_token': u'token'}\r\n\r\n fake_user = {\r\n u'username': u'teleyinex',\r\n u'first_name': u'John',\r\n u'last_name': u'Doe',\r\n u'verified': True,\r\n u'name': u'John Doe',\r\n u'locale': u'en_US',\r\n u'gender': u'male',\r\n u'email': u'[email protected]',\r\n u'quotes': u'\"quote',\r\n u'link': u'http://www.facebook.com/johndoe',\r\n u'timezone': 1,\r\n u'updated_time': u'2011-11-11T12:33:52+0000',\r\n u'id': u'11111'}\r\n\r\n from pybossa.view import facebook\r\n response_user = facebook.manage_user(fake_response['access_token'],\r\n fake_user, None)\r\n\r\n user = db.session.query(User).get(1)\r\n\r\n assert user.email_addr == response_user.email_addr, response_user", "def test_signup_facebook(self, mocked_facebook, mocked_sendy):\n url = '/0/chefs'\n data = {\n 'email': '[email protected]',\n 'fb_access_token': 'TOKEN',\n 'name': 'John',\n 'surname': 'Doe',\n 'language': 'es',\n }\n mocked_facebook.return_value = {\n 'id': '1401481816739108',\n 'email': '[email protected]'\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n\n # Test that we can log in\n url = '/0/chefs/login'\n data = {\n 'email': '[email protected]',\n 'fb_access_token': 'TOKEN',\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n\n # Test that we cannot log in without password\n data_set = [\n {'email': '[email protected]'},\n {'email': '[email protected]', 'password': ''}]\n for data in data_set:\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 400)", "def get_numeric_id(self, user_name):\n url = 'https://www.facebook.com/' + user_name\n self.get(url)\n source = self.page_source\n try:\n match = re.search(r\"profile_id=(\\d*)\", source)\n numeric_id = match.group(1)\n return numeric_id\n except (AttributeError, TypeError, KeyError, ValueError):\n log.error(\"Numeric ID not found, returning 0\")\n return 0", "def fb_id(self):\n social_auth = self.social_auth.latest('id')\n return social_auth.uid", "def test_40_facebook_oauth_creation(self):\r\n fake_response = {\r\n u'access_token': u'access_token',\r\n u'token_type': u'Bearer',\r\n u'expires_in': 3600,\r\n u'id_token': u'token'}\r\n\r\n fake_user = {\r\n u'username': u'teleyinex',\r\n u'first_name': u'John',\r\n u'last_name': u'Doe',\r\n u'verified': True,\r\n u'name': u'John Doe',\r\n u'locale': u'en_US',\r\n u'gender': u'male',\r\n u'email': u'[email protected]',\r\n u'quotes': u'\"quote',\r\n u'link': u'http://www.facebook.com/johndoe',\r\n u'timezone': 1,\r\n u'updated_time': u'2011-11-11T12:33:52+0000',\r\n u'id': u'11111'}\r\n\r\n self.register()\r\n self.signout()\r\n\r\n from pybossa.view import facebook\r\n response_user = facebook.manage_user(fake_response['access_token'],\r\n fake_user, None)\r\n\r\n assert response_user is None, response_user", "def test_login(self):\n self.facebook_page.login()\n self.assertIsNotNone(self.facebook_page.webdriver.find_element_by_name('requests'))", "def test_0001(init_driver):\n login_page = LoginPage(init_driver, base_url=\"https://www.facebook.com\").open()\n home_page = login_page.login(cfg.credentials('email'), cfg.credentials('password'))\n assert home_page.get_title() == Title.FACEBOOK_HOME", "def facebook_login():\n if not facebook.authorized:\n return redirect(url_for('facebook.login'))\n account_info = facebook.get('me?fields=id,name,email')\n# print(account_info)\n if account_info.ok:\n account_info_json = account_info.json()\n user = {}\n user['email'] = account_info_json['email']\n user['firstName'] = account_info_json['name'].split()[0]\n user['lastName'] = account_info_json['name'].split()[1]\n return third_party_user_handler(user['email'],user['firstName'], user['lastName'], 'facebook')", "def test_cyclingleagues_id_get(self):\n pass", "def get_id(share_url):\n url = get_redirect_url(share_url)\n id_num = re.findall('(\\d*)\\?', url)[0]\n if id_num.isnumeric():\n return id_num\n else:\n print(\"Something wrong with id number\")", "def get_facebook_user(cls, client, facebook_id):\n\n try:\n fbu = cls.objects.get(facebook_id__exact=facebook_id, deleted=False)\n except cls.DoesNotExist:\n try:\n user_data = client.obj_id(\n facebook_id,\n fields='id,name,first_name,middle_name,last_name,link,username,gender,locale,picture',\n )\n except FacebookGenericError:\n user_data = client.obj_id(\n facebook_id,\n fields='id,name,link,picture',\n )\n\n person = Person(\n name=user_data[u'name'] if u'name' in user_data else None,\n )\n person.save()\n\n fbu = cls(\n person=person,\n facebook_id=user_data[u'id'],\n name=user_data[u'name'] if u'name' in user_data else None,\n link=user_data[u'link'] if u'link' in user_data else None,\n page=True,\n )\n\n if u'picture' in user_data:\n fbu.picture = user_data[u'picture'][u'data'][u'url']\n else:\n person = Person(\n name=user_data[u'name'] if u'name' in user_data else None,\n gender=user_data[u'gender'] if u'gender' in user_data else None,\n )\n person.save()\n\n fbu = cls(\n person=person,\n facebook_id=user_data[u'id'],\n name=user_data[u'name'] if u'name' in user_data else None,\n first_name=user_data[u'first_name'] if u'first_name' in user_data else None,\n middle_name=user_data[u'middle_name'] if u'middle_name' in user_data else None,\n last_name=user_data[u'last_name'] if u'last_name' in user_data else None,\n link=user_data[u'link'] if u'link' in user_data else None,\n username=user_data[u'username'] if u'username' in user_data else None,\n gender=user_data[u'gender'] if u'gender' in user_data else None,\n locale=user_data[u'locale'] if u'locale' in user_data else None,\n picture=user_data[u'picture'][u'data'][u'url'] if u'picture' in user_data else None,\n page=False,\n )\n\n fbu.save()\n return fbu", "def test_get_adventure_guid(self):\n headers = {\n \"Accept\": \"application/json\",\n }\n response = self.client.open(\n \"/adventure/{adventure_id}\".format(adventure_id=\"adventure_id_example\"),\n method=\"GET\",\n headers=headers,\n )\n self.assert200(response, \"Response body is : \" + response.data.decode(\"utf-8\"))", "def test_get_user_id(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n # for patient\n self.assertEqual(\n PATIENT_ID, self.connection.get_user_id(PATIENT_USERNAME))\n # for doctor\n self.assertEqual(\n DOCTOR_ID, self.connection.get_user_id(DOCTOR_USERNAME))", "def test_sport_id(self):\n result = self.test_client.sport_id\n\n assert result == \"1\"", "def test_get_remote_id_from_data(self):\n data = {\"key\": \"/work/OL1234W\"}\n result = self.connector.get_remote_id_from_data(data)\n self.assertEqual(result, \"https://openlibrary.org/work/OL1234W\")\n # error handling\n with self.assertRaises(ConnectorException):\n self.connector.get_remote_id_from_data({})", "def get_facebook_token(request):\n url = request.query_params.get('url', None)\n if url is None:\n raise ValidationError(\"No callback URL specified\")\n\n user_id = request.query_params.get('user', None)\n if user_id is None:\n raise ValidationError(\"No user specified on the URL\")\n\n academy = request.query_params.get('a', None)\n if academy is None:\n raise ValidationError(\"No academy specified on the URL\")\n\n url = base64.b64decode(url).decode(\"utf-8\")\n # Missing scopes!! admin.invites:write, identify\n scopes = (\"email\",\n \"ads_read\", \"business_management\", \"leads_retrieval\", \"pages_manage_metadata\", \"pages_read_engagement\",\n )\n query_string = f'a={academy}&url={url}&user={user_id}'.encode(\"utf-8\")\n payload = str(base64.urlsafe_b64encode(query_string), \"utf-8\")\n params = {\n \"client_id\": os.getenv('FACEBOOK_CLIENT_ID', \"\"),\n \"redirect_uri\": os.getenv('FACEBOOK_REDIRECT_URL', \"\"),\n \"scope\": \",\".join(scopes),\n \"state\": payload\n }\n redirect = \"https://www.facebook.com/v8.0/dialog/oauth?\"\n for key in params:\n redirect += f\"{key}={params[key]}&\"\n\n if settings.DEBUG:\n return HttpResponse(f\"Redirect to: <a href='{redirect}'>{redirect}</a>\")\n else:\n return HttpResponseRedirect(redirect_to=redirect)" ]
[ "0.73383886", "0.69110185", "0.65447694", "0.63805336", "0.62422544", "0.62227225", "0.6216214", "0.6182178", "0.6166742", "0.6134784", "0.6118882", "0.6092159", "0.6026129", "0.6008636", "0.59688044", "0.596521", "0.5961509", "0.5940854", "0.58199996", "0.5752718", "0.5690613", "0.5657574", "0.56487745", "0.56325203", "0.56114125", "0.55990386", "0.55909747", "0.558376", "0.55705637", "0.55591196" ]
0.8471372
0
Test set chef's facebook id
def test_set_chef_facebook_id(self): url = '/0/facebook' data = {'fb_user_id': 'FB_ID', 'fb_access_token': 'TOKEN'} resp = self.client.post(url, data) self.assertPermissionDenied(resp) headers = self.login() # Without data resp = self.client.post(url, **headers) self.assertEqual(resp.status_code, 400) resp = self.client.post(url, data, **headers) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.data, {'response': {'return': True}}) self.assertEqual(Chefs.objects.get(pk=self.user.pk).fb_user_id, 'FB_ID') self.assertEqual(Chefs.objects.get(pk=self.user.pk).fb_access_token, 'TOKEN') # Cannot reset while set resp = self.client.post(url, data, **headers) self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_chef_facebook_id(self):\n url = '/0/facebook'\n\n self.user.fb_user_id = 'FB_ID'\n self.user.fb_access_token = 'TOKEN'\n self.user.save()\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data, {'response': {'fb_user_id': 'FB_ID'}})", "def test_delete_chef_facebook_id(self):\n url = '/0/facebook'\n\n self.user.fb_user_id = 'FB_ID'\n self.user.fb_access_token = 'TOKEN'\n self.user.save()\n\n resp = self.client.delete(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.delete(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data, {'response': {'return': True}})\n self.assertIsNone(Chefs.objects.get(pk=self.user.pk).fb_user_id)\n self.assertIsNone(Chefs.objects.get(pk=self.user.pk).fb_access_token)", "def test_get_chef_facebook_friends_to_follow(self, mocked_facebook):\n url = '/0/facebook/friends'\n\n mocked_facebook.return_value = {\n 'data': [\n {\n 'name': 'Friend1',\n 'id': 'FB_ID1'\n },\n {\n 'name': 'Friend2',\n 'id': 'FB_ID2'\n },\n {\n 'name': 'Friend3',\n 'id': 'FB_ID3'\n }\n ]\n }\n\n self.user.fb_user_id = 'FB_ID'\n self.user.fb_access_token = 'TOKEN'\n self.user.save()\n\n chef1 = self.create_user('1')\n chef1.fb_user_id = 'FB_ID1'\n chef1.save()\n\n self.user.follow(chef1)\n\n chef2 = self.create_user('2')\n chef2.fb_user_id = 'FB_ID2'\n chef2.save()\n\n chef3 = self.create_user('3')\n chef3.fb_user_id = 'FB_NON_FRIEND'\n chef3.save()\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('friends', resp.data)\n self.assertEqual(1, len(resp.data['friends']))\n self.assertEqual(chef2.email, resp.data['friends'][0]['email'])", "def test_meme_meme_id_get(self):\n pass", "def test_id(self):\n result = self.test_client.id\n\n assert result == \"86576599\"", "def test_signup_facebook(self, mocked_facebook, mocked_sendy):\n url = '/0/chefs'\n data = {\n 'email': '[email protected]',\n 'fb_access_token': 'TOKEN',\n 'name': 'John',\n 'surname': 'Doe',\n 'language': 'es',\n }\n mocked_facebook.return_value = {\n 'id': '1401481816739108',\n 'email': '[email protected]'\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n\n # Test that we can log in\n url = '/0/chefs/login'\n data = {\n 'email': '[email protected]',\n 'fb_access_token': 'TOKEN',\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n\n # Test that we cannot log in without password\n data_set = [\n {'email': '[email protected]'},\n {'email': '[email protected]', 'password': ''}]\n for data in data_set:\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 400)", "def test_id(self):\n result = self.test_client.id\n\n assert result == \"10423098\"", "def test_user_id_get(self):\n pass", "def test_signup_login_facebook(self, mocked_facebook, mocked_sendy):\n url = '/0/chefs'\n data = {\n 'email': '[email protected]',\n 'fb_access_token': 'TOKEN',\n 'name': 'John',\n 'surname': 'Doe',\n 'language': 'es',\n }\n mocked_facebook.return_value = {\n 'id': '1401481816739108',\n 'email': '[email protected]'\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('auth', resp.data)\n self.assertIn('token', resp.data['auth'])\n\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('auth', resp.data)\n self.assertIn('token', resp.data['auth'])", "def test_39_facebook_oauth_creation(self):\r\n fake_response = {\r\n u'access_token': u'access_token',\r\n u'token_type': u'Bearer',\r\n u'expires_in': 3600,\r\n u'id_token': u'token'}\r\n\r\n fake_user = {\r\n u'username': u'teleyinex',\r\n u'first_name': u'John',\r\n u'last_name': u'Doe',\r\n u'verified': True,\r\n u'name': u'John Doe',\r\n u'locale': u'en_US',\r\n u'gender': u'male',\r\n u'email': u'[email protected]',\r\n u'quotes': u'\"quote',\r\n u'link': u'http://www.facebook.com/johndoe',\r\n u'timezone': 1,\r\n u'updated_time': u'2011-11-11T12:33:52+0000',\r\n u'id': u'11111'}\r\n\r\n from pybossa.view import facebook\r\n response_user = facebook.manage_user(fake_response['access_token'],\r\n fake_user, None)\r\n\r\n user = db.session.query(User).get(1)\r\n\r\n assert user.email_addr == response_user.email_addr, response_user", "def test_facebook_login(self):\n response = self.client.get('/login/facebook/')\n self.assertEqual(response.status_code, 302)", "def test_fb(self, username, password):\n self.driver.get('https://www.facebook.com/')\n name = self.driver.find_element(By.XPATH, \"//input[@id='email']\")\n pass_word = self.driver.find_element(By.XPATH, \"//input[@id='pass']\")\n name.send_keys(username)\n pass_word.send_keys(password)", "def fb_id(self):\n social_auth = self.social_auth.latest('id')\n return social_auth.uid", "def test_40_facebook_oauth_creation(self):\r\n fake_response = {\r\n u'access_token': u'access_token',\r\n u'token_type': u'Bearer',\r\n u'expires_in': 3600,\r\n u'id_token': u'token'}\r\n\r\n fake_user = {\r\n u'username': u'teleyinex',\r\n u'first_name': u'John',\r\n u'last_name': u'Doe',\r\n u'verified': True,\r\n u'name': u'John Doe',\r\n u'locale': u'en_US',\r\n u'gender': u'male',\r\n u'email': u'[email protected]',\r\n u'quotes': u'\"quote',\r\n u'link': u'http://www.facebook.com/johndoe',\r\n u'timezone': 1,\r\n u'updated_time': u'2011-11-11T12:33:52+0000',\r\n u'id': u'11111'}\r\n\r\n self.register()\r\n self.signout()\r\n\r\n from pybossa.view import facebook\r\n response_user = facebook.manage_user(fake_response['access_token'],\r\n fake_user, None)\r\n\r\n assert response_user is None, response_user", "def facebook(self, facebook):\n\n self._facebook = facebook", "def test_get_by_id(self):\n with self.client:\n self.client.post('/users/login', data=dict(\n username=\"eschoppik\", password='secret'\n ), follow_redirects=True)\n self.assertTrue(current_user.id == 1)\n self.assertFalse(current_user.id == 20)", "def test_facebook_login(self, get_object):\n get_object.return_value = self.return_val\n self.test_client().post(self.facebook_url, self.facebook_token, format=\"json\")\n response = self.client.post(self.facebook_url,\n self.facebook_token, format=\"json\")\n self.assertEqual(response.status_code, 200)", "def test_prefectures_id_get(self):\n pass", "def test_user_id_put(self):\n pass", "def test_30_app_id_owner(self, mock):\r\n self.register()\r\n self.new_application()\r\n\r\n res = self.app.get('/app/sampleapp/settings', follow_redirects=True)\r\n assert \"Sample App\" in res.data, (\"Application should be shown to \"\r\n \"the owner\")\r\n msg = '<strong><i class=\"icon-cog\"></i> ID</strong>: 1'\r\n err_msg = \"Application ID should be shown to the owner\"\r\n assert msg in res.data, err_msg\r\n\r\n self.signout()\r\n with self.flask_app.app_context():\r\n self.create()\r\n self.signin(email=Fixtures.email_addr2, password=Fixtures.password)\r\n res = self.app.get('/app/sampleapp/settings', follow_redirects=True)\r\n assert res.status_code == 403, res.status_code", "async def test_unique_id(hass: HomeAssistant, start_ha) -> None:\n assert len(hass.states.async_all()) == 1", "def test_cyclingleagues_id_get(self):\n pass", "def search_username(ausername):\n print \"Searching: \" + ausername + \" -> \",\n url = \"http://www.findmyfbid.com/\"\n post_data = \"https://www.facebook.com/\" + ausername\n user_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:42.0) Gecko/20100101 Firefox/42.0\"\n headers = {'User-Agent': user_agent}\n req = requests.post(url, headers=headers, data = { \"url\": post_data})\n html_data = req.text\n soup = BeautifulSoup(html_data, 'html.parser')\n resp = str(soup.code)\n ugly1 = resp.split(\">\")\n ugly2 = ugly1[1].split(\"<\")\n if resp == \"<code>https://www.facebook.com</code>\":\n print \"No ID found :(\"\n else:\n print ugly2[0]", "def test_open_id_setup(self):\r\n self.attempt_login(200)", "async def test_migrate_unique_id(hass: HomeAssistant, utcnow) -> None:\n entity_registry = er.async_get(hass)\n aid = get_next_aid()\n fan_entry = entity_registry.async_get_or_create(\n \"fan\",\n \"homekit_controller\",\n f\"homekit-00:00:00:00:00:00-{aid}-8\",\n )\n await setup_test_component(hass, create_fanv2_service_non_standard_rotation_range)\n\n assert (\n entity_registry.async_get(fan_entry.entity_id).unique_id\n == f\"00:00:00:00:00:00_{aid}_8\"\n )", "def test_getextid(self):\n crusoe = self.fixtures.crusoe\n email = crusoe.email.email\n service_facebook = 'facebook'\n\n externalid = models.UserExternalId( # NOQA: S106\n service=service_facebook,\n user=crusoe,\n userid=crusoe.email.email,\n username=crusoe.email.email,\n oauth_token=environ.get('FACEBOOK_OAUTH_TOKEN'),\n oauth_token_type='Bearer',\n )\n\n db.session.add(externalid)\n db.session.commit()\n result = models.getextid(service_facebook, userid=email)\n self.assertIsInstance(result, models.UserExternalId)\n assert '<UserExternalId {service}:{username} of {user}>'.format(\n service=service_facebook, username=email, user=repr(crusoe)[1:-1]\n ) in repr(result)", "def test_brains_id_get(self):\n pass", "async def test_light_unique_id(hass: HomeAssistant) -> None:\n already_migrated_config_entry = MockConfigEntry(\n domain=DOMAIN, data={}, unique_id=MAC_ADDRESS\n )\n already_migrated_config_entry.add_to_hass(hass)\n bulb = _mocked_bulb()\n bulb.color_temp = None\n with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):\n await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})\n await hass.async_block_till_done()\n\n entity_id = \"light.my_bulb\"\n entity_registry = er.async_get(hass)\n assert entity_registry.async_get(entity_id).unique_id == \"AABBCCDDEEFF\"", "def test_0001(init_driver):\n login_page = LoginPage(init_driver, base_url=\"https://www.facebook.com\").open()\n home_page = login_page.login(cfg.credentials('email'), cfg.credentials('password'))\n assert home_page.get_title() == Title.FACEBOOK_HOME", "def test_basketballteams_id_get(self):\n pass" ]
[ "0.79430753", "0.66861093", "0.62252456", "0.59206605", "0.5812997", "0.5798508", "0.57349694", "0.5734449", "0.5725819", "0.5719309", "0.5678224", "0.5661664", "0.5649968", "0.5649178", "0.564226", "0.5627347", "0.56215537", "0.5605089", "0.55495596", "0.54952425", "0.5486986", "0.54666656", "0.54611754", "0.543125", "0.5420368", "0.5390138", "0.5382956", "0.53815055", "0.538069", "0.53765726" ]
0.78903675
1
Test delete chef's facebook id
def test_delete_chef_facebook_id(self): url = '/0/facebook' self.user.fb_user_id = 'FB_ID' self.user.fb_access_token = 'TOKEN' self.user.save() resp = self.client.delete(url) self.assertPermissionDenied(resp) headers = self.login() resp = self.client.delete(url, **headers) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.data, {'response': {'return': True}}) self.assertIsNone(Chefs.objects.get(pk=self.user.pk).fb_user_id) self.assertIsNone(Chefs.objects.get(pk=self.user.pk).fb_access_token)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_id_delete(self):\n pass", "def test_get_chef_facebook_id(self):\n url = '/0/facebook'\n\n self.user.fb_user_id = 'FB_ID'\n self.user.fb_access_token = 'TOKEN'\n self.user.save()\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data, {'response': {'fb_user_id': 'FB_ID'}})", "def test_set_chef_facebook_id(self):\n url = '/0/facebook'\n\n data = {'fb_user_id': 'FB_ID', 'fb_access_token': 'TOKEN'}\n\n resp = self.client.post(url, data)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n\n # Without data\n resp = self.client.post(url, **headers)\n self.assertEqual(resp.status_code, 400)\n\n resp = self.client.post(url, data, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data, {'response': {'return': True}})\n self.assertEqual(Chefs.objects.get(pk=self.user.pk).fb_user_id, 'FB_ID')\n self.assertEqual(Chefs.objects.get(pk=self.user.pk).fb_access_token, 'TOKEN')\n\n # Cannot reset while set\n resp = self.client.post(url, data, **headers)\n self.assertEqual(resp.status_code, 400)", "def test_meme_meme_id_delete(self):\n pass", "def test_delete_identity(self):\n pass", "def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_delete_user_by_id_non_existent(client: FlaskClient) -> None:\n username = create_random_username()\n auth_token = create_auth_token(username)\n response = delete_user(client, username, auth_token.signed)\n assert_error_response(response, HTTPStatus.NOT_FOUND)", "def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200", "def test_delete(self):\n pass", "def test_workflows_id_delete(self):\n pass", "def test_delete_user(self):\n\n with self.client:\n result = self.client.post('/users/cool-guy-johnny-B/delete',\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertNotIn(b'cool-guy-johnny-B', result.data)", "def test_delete7(self):\n pass", "def test_delete_user_by_id_mismatch(client: FlaskClient) -> None:\n username = create_random_username()\n other_username = create_random_username()\n auth_token = create_auth_token(other_username)\n response = delete_user(client, username, auth_token.signed)\n assert_error_response(response, HTTPStatus.FORBIDDEN)", "def test_delete_user_by_id_non_admin(client: FlaskClient, db_session) -> None:\n username = create_random_username()\n populate_database_with_users(db_session, username)\n auth_token = create_auth_token(username)\n response = delete_user(client, username, auth_token.signed)\n assert response.status_code == HTTPStatus.NO_CONTENT\n assert response.content_length is None\n assert GifSyncUser.get_by_username(username) is None", "def test_delete_remember_me(self):\n pass", "def test_delete_flashcard(self):\n values = (\n \"\"\"\n 'password', '[email protected]', 'false', 'false',\n NULL, NULL, now(), NULL\n \"\"\"\n )\n insert_user_row(values)\n values = (\n \"\"\"\n 'Computer Science', 'Computer Science Flashcard', 1\n \"\"\"\n )\n insert_flashcard_row(values)\n select_flashcard_command = (\n \"\"\"\n SELECT * FROM flashcards\n WHERE id = 1;\n \"\"\"\n )\n before_delete_data = ExecuteCommandFetchData().execute_command(\n select_flashcard_command)\n self.assertEqual(before_delete_data[0][1], 'Computer Science')\n delete_flashcard_row('1')\n after_delete_data = ExecuteCommandFetchData().execute_command(\n select_flashcard_command)\n self.assertEqual(after_delete_data, [])", "def test_delete_run(self):\n pass", "def delete():", "def test_delete_user(self):\n pass", "def test_delete_user(self):\n pass", "def test_delete_item_incorrect_id(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_delete_device_by_id(self):\n pass", "def test_not_integrated(self):\n self.fbuser.delete()\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 102)\n self.assertEqual(UserFitbit.objects.count(), 0)", "def test_delete_card(self):\n CardFactory()\n self.session.commit()\n resp = self.app.get('cards/1')\n\n assert resp.status_code == 200\n\n resp = self.app.delete('cards/1')\n\n assert resp.status_code == 200\n\n resp = self.app.get('cards/1')\n\n assert resp.status_code == 404", "def test_duo_account_delete(self):\n pass", "def delete(self, _id):", "def test_delete_login():\n # Pick some arbitrary UUID.\n uuid = \"b58cba44-da39-11e5-9342-56f85ff10656\"\n assert_redirect_to_login('/delete/{}/'.format(uuid))\n\n # We would get redirected to '/', since the UUID above is junk\n # assert_not_redirect_to_login('/delete/{}/'.format(uuid))", "def test_delete_device_by_id1(self):\n pass", "def test_delete_profile(self):\n self.cim.delete_profile(customer_profile_id=u\"123\")", "def test_delete_inexistent_activity(self):\n username = 'messi'\n self.create_user(username)\n self.testapp.delete('/activities/%s' % 'fakeid', '', oauth2Header(username), status=404)" ]
[ "0.6959481", "0.6900694", "0.68606555", "0.68168575", "0.6722174", "0.6570718", "0.6499945", "0.64777905", "0.64528614", "0.64465207", "0.6391612", "0.6390442", "0.6386338", "0.63839215", "0.6376821", "0.6372202", "0.6361435", "0.6347037", "0.6328622", "0.6328622", "0.63098484", "0.6305417", "0.62824076", "0.6268476", "0.6256399", "0.62416285", "0.6234257", "0.6230179", "0.6227713", "0.62264234" ]
0.87928045
0
Test get chef's facebook friends to follow
def test_get_chef_facebook_friends_to_follow(self, mocked_facebook): url = '/0/facebook/friends' mocked_facebook.return_value = { 'data': [ { 'name': 'Friend1', 'id': 'FB_ID1' }, { 'name': 'Friend2', 'id': 'FB_ID2' }, { 'name': 'Friend3', 'id': 'FB_ID3' } ] } self.user.fb_user_id = 'FB_ID' self.user.fb_access_token = 'TOKEN' self.user.save() chef1 = self.create_user('1') chef1.fb_user_id = 'FB_ID1' chef1.save() self.user.follow(chef1) chef2 = self.create_user('2') chef2.fb_user_id = 'FB_ID2' chef2.save() chef3 = self.create_user('3') chef3.fb_user_id = 'FB_NON_FRIEND' chef3.save() resp = self.client.get(url) self.assertPermissionDenied(resp) headers = self.login() resp = self.client.get(url, **headers) self.assertEqual(resp.status_code, 200) self.assertIn('friends', resp.data) self.assertEqual(1, len(resp.data['friends'])) self.assertEqual(chef2.email, resp.data['friends'][0]['email'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_followers(self):\n pass", "def follows_target_check(twitter,top_followers_list):\n yes_follow_list = []\n not_follow_list = []\n following_dict = {}\n target = 'HillaryClinton'\n \n for user in top_followers_list:\n params = {'source_id':user, 'target_screen_name':target}\n response = twitter.request('friendships/show', params)\n data = response.json()\n #print(\"DATAAA::\",data)\n if response.status_code == 200:\n #print(\"IN BIGG IFFFFF:::\")\n following_dict = data['relationship']['source']\n #print(\"following_dict::\",following_dict)\n check = following_dict['following']\n #print(\"check::\",check)\n if check:\n #print(\"IN IFFFFF:::\")\n yes_follow_list.append(user)\n \n else:\n #print(\"IN ELSEEEE:::\")\n not_follow_list.append(user)\n \n else:\n print('Got error %s \\nsleeping for 15 minutes.' % response.text)\n sys.stderr.flush()\n time.sleep(61 * 15)\n \n print(\"YES_LIST:::\",yes_follow_list) \n print(\"NO_LIST:::\",not_follow_list) \n return not_follow_list", "def test_view_those_you_follow(self):\n response = self.client.get(\n reverse('following')\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_message_followers(self):\n # Setup\n # 'unitfriend' should have 2 followers: 'unittest' and 'message_test'(that unittest is a member of)\n self.log_in_as('unittest')\n self.assertIn('unitfriend', [following['username'] for following in self.get_member()['following']['items']]) # Check unittest is a follower of unitfriend\n self.create_group('message_test')\n self.set_persona('message_test')\n self.follow('unitfriend')\n \n self.log_in_as('unitfriend')\n num_notifications = self.getNumNotificationsInDB()\n num_emails = getNumEmails()\n num_unitfriend_followers = self.get_member('unitfriend')['member']['num_followers'] # NOTE: all followers MUST be users appart from one group (added above) or this test will fail\n \n self.boom_content(1) # Boom API doc guaranteed to be content 1\n \n self.assertEquals(self.getNumNotificationsInDB(), num_notifications + num_unitfriend_followers )\n self.assertEquals( getNumEmails() , num_emails + num_unitfriend_followers - 1)\n \n self.log_in_as('unittest')\n self.set_persona('message_test')\n self.delete_group('message_test')", "def test_get_chef_facebook_id(self):\n url = '/0/facebook'\n\n self.user.fb_user_id = 'FB_ID'\n self.user.fb_access_token = 'TOKEN'\n self.user.save()\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data, {'response': {'fb_user_id': 'FB_ID'}})", "def test_followers(self):\n self.resource._request.register_uri(\n 'GET', '/users/dotzero/followers?page=2', 'fixture_post.json')\n\n response = self.resource.followers('dotzero', 2)\n\n self.assertTrue('data' in response)\n self.assertTrue('server_time' in response)", "def test_followers(self):\n\n follow1 = Follows(\n user_being_followed_id = self.testuser.id,\n user_following_id = self.testuser2.id\n )\n follow2 = Follows(\n user_being_followed_id = self.testuser3.id,\n user_following_id = self.testuser.id\n )\n\n db.session.add_all((follow1, follow2))\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.get(f\"/users/{self.testuser.id}/followers\")\n data = str(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@bob\", data)\n self.assertNotIn(\"@carl\", data)\n self.assertNotIn(\"@alvin\", data)", "def test_view_followers(self):\n response = self.client.get(\n reverse('followers')\n )\n self.assertIn(b'followers', response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def follow_someone(screen_name):\n twitter.create_friendship(screen_name=screen_name)", "async def follow(follow):\n await follow.edit(\n f\"`FOLLOW {DEFAULTUSER} ON` \\n\\n\"\n f\"[InstaGram](https://www.instagram.com/mayur_karaniya) \\n\\n\"\n f\"[FaceBook](https://www.facebook.com/mkaraniya) \\n\\n\"\n f\"[YouTube](https://www.youtube.com/channel/UCeKQxQK7XZ3jGi3541uWATg?sub_confirmation=1) \"\n )", "def follow(self, follower, followee):\n pass", "def test_get_list_of_followers_with_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n response = self.client.get(self.followers_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_followers_following_list_authorized(self):\n\n # user2 following user1\n # follow = Follows(user_being_followed_id=1, user_following_id=2)\n\n self.u2.following.append(self.u)\n db.session.commit()\n\n with self.client as client:\n\n client.post(\n '/login',\n data = {\n \"username\" : self.u.username,\n \"password\" : \"password\"\n },\n )\n\n response = client.get(\"/users/2/following\")\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('\"/users/1\"' ,html)\n \n response = client.get(\"/users/1/followers\")\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('\"/users/2\"' ,html)", "def test_following(self):\n\n follow1 = Follows(\n user_being_followed_id = self.testuser2.id,\n user_following_id = self.testuser.id\n )\n follow2 = Follows(\n user_being_followed_id = self.testuser3.id,\n user_following_id = self.testuser.id\n )\n\n db.session.add_all((follow1, follow2))\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.get(f\"/users/{self.testuser.id}/following\")\n data = str(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@bob\", data)\n self.assertIn(\"@carl\", data)\n self.assertNotIn(\"@alvin\", data)", "def test_follow(self):\n activity = {\n \"@context\": \"https://www.w3.org/ns/activitystreams\",\n \"id\": \"https://example.com/users/rat/follows/123\",\n \"type\": \"Follow\",\n \"actor\": \"https://example.com/users/rat\",\n \"object\": \"https://example.com/user/mouse\",\n }\n\n self.assertFalse(models.UserFollowRequest.objects.exists())\n with patch(\n \"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async\"\n ) as mock:\n views.inbox.activity_task(activity)\n self.assertEqual(mock.call_count, 1)\n response_activity = json.loads(mock.call_args[1][\"args\"][1])\n self.assertEqual(response_activity[\"type\"], \"Accept\")\n\n # notification created\n notification = models.Notification.objects.get()\n self.assertEqual(notification.user, self.local_user)\n self.assertEqual(notification.notification_type, \"FOLLOW\")\n\n # the request should have been deleted\n self.assertFalse(models.UserFollowRequest.objects.exists())\n\n # the follow relationship should exist\n follow = models.UserFollows.objects.get(user_object=self.local_user)\n self.assertEqual(follow.user_subject, self.remote_user)", "def test_get_list_of_following_users_with_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response = self.client.get(self.following_list_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_follow_accept(self, *_):\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async\"):\n rel = models.UserFollowRequest.objects.create(\n user_subject=self.local_user, user_object=self.remote_user\n )\n activity = {\n \"@context\": \"https://www.w3.org/ns/activitystreams\",\n \"id\": \"https://example.com/users/rat/follows/123#accepts\",\n \"type\": \"Accept\",\n \"actor\": \"https://example.com/users/rat\",\n \"object\": {\n \"id\": rel.remote_id,\n \"type\": \"Follow\",\n \"actor\": \"https://example.com/user/mouse\",\n \"object\": \"https://example.com/users/rat\",\n },\n }\n\n self.assertEqual(models.UserFollowRequest.objects.count(), 1)\n\n views.inbox.activity_task(activity)\n\n # request should be deleted\n self.assertEqual(models.UserFollowRequest.objects.count(), 0)\n\n # relationship should be created\n follows = self.remote_user.followers\n self.assertEqual(follows.count(), 1)\n self.assertEqual(follows.first(), self.local_user)", "def follow(user, people):\n api = get_api(user)\n current_screen_name = api.VerifyCredentials().GetScreenName()\n\n # don't let a user follow themselves\n screen_names = [person.twitter_screen_name for person in people]\n if current_screen_name in screen_names: screen_names.remove(current_screen_name)\n\n followed = []\n not_followed = []\n\n for screen_name in screen_names:\n try:\n api.CreateFriendship(screen_name=screen_name)\n followed.append(screen_name)\n except twitter.TwitterError:\n not_followed.append(screen_name)\n\n return 'followed %s people' % len(followed)", "def test_person_is_following(self):\n person = Person.objects.create(\n username='tom', email='[email protected]', password='fake_password'\n )\n self.assertFalse(person.is_following(1))\n show = FollowedShows.objects.create(\n user=person,\n show_name='show1',\n show_id=1,\n air_days='monday, tuesday',\n air_time='10:00',\n summary='summary here',\n network='network here'\n )\n self.assertTrue(person.is_following(show.show_id))", "def find_new_followers(self):\n api = self.api\n geocode = self.geocode\n queries = self.queries\n hits_per_query = self.hits_per_query\n\n self.log.debug(\"Initialize\")\n self.log.debug(\"[ ********* FIND NEW FOLLOWERS *********** ]\")\n\n if self.strategy == UserProfile.FOLLOW or self.strategy == UserProfile.TWEET:\n # Find statuses that match our interests\n self.log.debug(\"Strategy set to FOLLOW or TWEET\")\n n = hits_per_query\n search_dict = dict()\n search_dict['lang'] = \"en\"\n if not geocode is None:\n search_dict['geocode'] = geocode\n statuses = list()\n self.log.debug(\"Queries:\")\n for q in queries:\n search_dict['q'] = q\n results = [c for c in Cursor(api.search, **search_dict).items(n)]\n self.log.debug(\" => %s: %s hits\" % (q, len(results)))\n statuses.extend(results)\n #self.log.debug(\"Statuses: %s\" % \"\\n\".join([str(s.__dict__) for s in statuses]))\n # Get all the screen names of senders and receivers\n screen_names = ([t.from_user for t in statuses] +\n [t.to_user for t in statuses if t.to_user])\n\n # Convert the strings to Tweepy user objects\n users, remainder = lookup_users_by_screen_name(self.api, screen_names)\n\n elif self.strategy == UserProfile.STEAL:\n users = []\n stolen_from = {}\n for competitor in list(self.competitors):\n self.log.debug(\"[ ********* STEAL %s *********** ]\" % competitor)\n try:\n competitor_friends_ids = self.api.friends_ids(competitor)\n competitor_followers_ids = self.api.followers_ids(competitor)\n\n filter_known_users_to_reduce_api_hits = False\n\n if filter_known_users_to_reduce_api_hits is True:\n new_competitor_friends_ids = [id for id in competitor_friends_ids if not len(TwitterAccount.objects.filter(twitter_id=id)) > 0 ]\n old_competitor_friends_ids = [id for id in competitor_friends_ids if len(TwitterAccount.objects.filter(twitter_id=id)) > 0 ]\n new_competitor_followers_ids = [id for id in competitor_followers_ids if not len(TwitterAccount.objects.filter(twitter_id=id)) > 0 ]\n old_competitor_followers_ids = [id for id in competitor_followers_ids if len(TwitterAccount.objects.filter(twitter_id=id)) > 0 ]\n # print new_competitor_friends_ids\n # print old_competitor_friends_ids\n # print new_competitor_followers_ids\n # print old_competitor_followers_ids\n\n print \"start lookups\"\n new_competitor_friends, remaining_friends = utils.lookup_users_by_id(self.api, new_competitor_friends_ids)\n new_competitor_followers, remaining_followers = utils.lookup_users_by_id(self.api, new_competitor_followers_ids)\n print \"end lookups\"\n else:\n # get all the tweepy users\n print \"start lookups\"\n new_competitor_friends, remaining_friends = utils.lookup_users_by_id(self.api, competitor_friends_ids)\n new_competitor_followers, remaining_followers = utils.lookup_users_by_id(self.api, competitor_followers_ids)\n print \"end lookups\"\n\n print \"%s has %s friends\" % (competitor, len(new_competitor_friends))\n print \"%s has %s followers\" % (competitor, len(new_competitor_followers))\n\n # holy crap this is so fucked up i'm ashamed that this code is getting written like this!\n\n for u in new_competitor_friends + new_competitor_followers:\n stolen_from.update({u.screen_name.lower(): competitor})\n\n except Exception, e:\n print e\n # didn't get all the users, don't remove the competitor\n # from the competitor list\n pass\n else:\n # got all the competitors friends and followers and converted them\n # to tweepy users.\n users += new_competitor_friends\n users += new_competitor_followers\n # add them to the users list to be processed in the next block (for user in users)\n # then pop the name off the competitors list in the UserProfile\n # fuck it for now i'm going to just cycle the item to the bottom of the competitor list so we can start getting maximal coverage within api constraints by making sure the top person is new every time\n self.competitors.append(self.competitors.pop(0))\n # return # for now\n self.profile.competitors = \"\\r\\n\".join(self.competitors)\n self.profile.save()\n\n # use the profile competitors list\n # for each name in competitors list\n # add all friends\n\n # should filter out garbage users. something like:\n users = [u for u in users if not Target.objects.filter(hunter=self.user, hunted__screen_name=u.screen_name.lower())]\n\n for user in users:\n twitter_account, created = utils.get_or_create_twitter_account(user)\n target, created = Target.objects.get_or_create(\n hunter=self.user, hunted=twitter_account)\n print target.hunted.screen_name, created\n if created:\n try:\n screen_name = user.screen_name.lower()\n match = lambda x: screen_name in \\\n (x.from_user.lower(), x.to_user and x.to_user.lower())\n if not self.strategy == UserProfile.STEAL:\n trigger_tweet = filter(match, statuses)[0].text\n else:\n try:\n trigger_tweet = \"Steal from user: %s\" % stolen_from.get(screen_name.lower(), \"someone. i lost it. sorry.\")\n except Exception, e:\n print \"YUCK. ERRORS.\"\n print \"YUCK. ERRORS.\"\n print e\n print e\n except Exception, e:\n self.log.exception(\"Could not get trigger tweet for %s\" %\n user.screen_name.lower())\n trigger_tweet = \"Error: Couldn't retrieve tweet.\"\n self.log.debug(\"Saved twitter account %s (trigger: %r)\" %\n (twitter_account.screen_name,\n trigger_tweet[:50]))\n target.reason = trigger_tweet\n target.status = Target.ON_DECK\n target.save()\n else:\n pass\n # print \" - Previously followed this dudesicle: %s\" % user.screen_name", "def auto_follow_followers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_following_back = followers - following\n\n for user_id in not_following_back:\n try:\n t.friendships.create(user_id=user_id, follow=False)\n except Exception as e:\n print(\"error: %s\" % (str(e)))", "def get_user_friends(acct, KEY, SECRET): # this isn't true - evaluate what needs to be returned tomorrow.\n\n new_gr_session = OAuth1Session(\n consumer_key=KEY,\n consumer_secret=SECRET,\n access_token=acct.access_token,\n access_token_secret=acct.access_token_secret\n )\n\n user_id = str(acct.user.gr_id)\n current_page = 1\n\n total, friends = get_friends_page(new_gr_session, user_id, current_page)\n\n # check for no friends first\n if len(friends) == 0:\n flash(\"No Goodreads friends found.\")\n print \"No friends!\"\n\n # friends requests return a list of 30 at a time\n # get total number of pages required.\n total_pages = int(math.ceil(total / float(30)))\n # creates new users and adds friendship relationships to db\n add_user_friendships(friends, acct)\n\n # check for more than 30 friends\n if total_pages > 1:\n\n current_page = 2\n while current_page <= total_pages:\n\n print \"******YOU HAVE MORE FRIENDS*******\"\n\n # wait 1 second between calls, per GR policy\n time.sleep(1.00)\n\n # create new query with updated current_page\n total, friends = get_friends_page(new_gr_session, user_id, current_page)\n add_user_friendships(friends, acct)\n current_page += 1\n\n return None", "def follow_user(cls, user, following):\r\n pass", "def test_request_friend(self):\n self.test_login_user()\n self.test_create_user('b')\n url = reverse('MGA:send_friend_request')\n data = {'id': 2}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def follow_friend():\n print \"followuser\"\n username = request.args.get('username')\n print \"JSON Data\", username\n # username= req_data[username]\n whom_id = get_user_id(username)\n print \"whom_id:\", whom_id\n if whom_id is None:\n abort(404)\n follow_query(whom_id)\n flash('You are now following \"%s\"' % username)\n name = {'name of following user': username}\n R_SERVER.delete(user_timeline_key)\n return jsonify(Username=name, Status_code=status.HTTP_200_OK)", "def test_remove_followers(self):\n pass", "def test_followed(self):\n self.resource._request.register_uri(\n 'GET', '/users/dotzero/followed?page=2', 'fixture_post.json')\n\n response = self.resource.followed('dotzero', 2)\n\n self.assertTrue('data' in response)\n self.assertTrue('server_time' in response)", "def userFollowers(nick):\n if (len(nick) != 1):\n print \"Has d'introduir només un nick\"\n return\n i.userFollow(nick[0])", "def test_how_many_friends(self):\n expected = [\n (1, 3), (2, 3), (3, 3), (5, 3), (8, 3),\n (0, 2), (4, 2), (6, 2), (7, 2), (9, 1),\n ]\n self.assertEqual(expected, self.users.how_many_friends())", "def test_followers_following_list_unauthorized(self):\n\n self.u2.following.append(self.u)\n db.session.commit()\n\n with self.client as client:\n response = client.get(\"/users/2/following\")\n\n self.assertEqual(response.location, \"http://localhost/\")\n self.assertIn('Access unauthorized.', get_flashed_messages())\n\n response2 = client.get(\"/users/2/followers\")\n\n self.assertEqual(response2.location, \"http://localhost/\")\n self.assertIn('Access unauthorized.', get_flashed_messages())" ]
[ "0.6975192", "0.664485", "0.66437894", "0.66080886", "0.6545551", "0.6520821", "0.6515515", "0.64823484", "0.64600056", "0.64239573", "0.6390204", "0.6386054", "0.63784146", "0.63372743", "0.63342613", "0.6311041", "0.6188004", "0.61508006", "0.612374", "0.6114839", "0.6103889", "0.6088376", "0.60859126", "0.6080301", "0.6079838", "0.6061555", "0.60504824", "0.6043737", "0.6010836", "0.6007588" ]
0.8386643
0
rotates the complete matrix according to x,y,z
def rotatematrix(m, x, y ,z): for i in xrange(x): m = rotatem_x(m) for i in xrange(y): m = rotatem_y(m) for i in xrange(z): m = rotatem_z(m) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate(self, x=0, y=0, z=0):\n\t\tquaternion = R.from_euler('xyz', [x, y, z], degrees=True)\n\t\trotation_matrix = np.array(quaternion.as_matrix())\n\t\trotation_matrix = np.pad(rotation_matrix, [(0, 1), (0, 1)], mode='constant')\n\t\trotation_matrix[3,3] = 1\n\n\t\tself.matrix = np.matmul(self.matrix, rotation_matrix)", "def rotateZ(self, *args, **kwargs):\n ...", "def rotate_matrix(self, mat):\r\n N=3\r\n for x in range(0, int(N / 2)):\r\n for y in range(x, N-x-1):\r\n temp = mat[x][y]\r\n mat[x][y] = mat[y][N-1-x]\r\n mat[y][N-1-x] = mat[N-1-x][N-1-y]\r\n mat[N-1-x][N-1-y] = mat[N-1-y][x]\r\n mat[N-1-y][x] = temp\r\n return mat", "def rotate(self, matrix: list[list[int]]) -> None:", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def rotate3(x, angle_x=0, angle_y=0, angle_z=0, origin=(0, 0, 0)):\n origin = np.asarray(origin)\n x = np.asarray(x) - origin\n r = rotation_matrix3(angle_x, angle_y, angle_z)\n return x.dot(r.T) + origin", "def quick_rot(line):\n\treturn zip(*reversed(create_matrix(line)))", "def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])", "def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)", "def rotMatrix( source = None ):\n if source is None:\n return None,None\n else:\n (x,y,z, a) = source\n if a % TWOPI:\n return tmatrixaccel.rotMatrix( x,y,z,a ),tmatrixaccel.rotMatrix( x,y,z,-a )\n return None,None", "def matrix_rotate_3d_x(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_x = -deg * pi/180\n c_x = cos(rad_x)\n s_x = sin(rad_x)\n return np.matrix([[1, 0, 0], [0, c_x, -s_x], [0, s_x, c_x]])", "def rotate(self, matrix):\n n = len(matrix)\n \n for circle in range(n//2):\n r_circle = n - circle - 1\n for i in range(circle, n - circle - 1):\n a = matrix[circle][i]\n b, matrix[i][r_circle] = matrix[i][r_circle], a\n c, matrix[r_circle][n - i - 1] = matrix[r_circle][n - i - 1], b\n d, matrix[n - i - 1][circle] = matrix[n - i - 1][circle], c\n matrix[circle][i] = d", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def rotate(self, matrix: List[List[int]]) -> None:\n r = c = len(matrix)\n m = 0\n n = r - 1\n\n while m < n:\n i = m\n for j in range(m, n):\n # print(i, j)\n # print(j, n)\n # print(n, c - j - 1)\n # print(c - j - 1, m)\n temp1 = matrix[j][n]\n matrix[j][n] = matrix[i][j]\n\n temp2 = matrix[n][c - j - 1]\n matrix[n][c - j - 1] = temp1\n\n temp3 = matrix[c - j - 1][m]\n matrix[c - j - 1][m] = temp2\n\n matrix[i][j] = temp3\n # print(matrix)\n m += 1\n n -= 1", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def rotation_matrix(self,rot_mat,center=True,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n\n if center:\n xyz0 = np.mean(xyz)\n xyz = np.dot(rot_mat,(xyz-xyz0).T).T + xyz0\n else:\n xyz = np.dot(rot_mat,(xyz).T).T\n self.update('x,y,z',xyz,**kwargs)", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for l in range(n // 2):\n r = n - 1 - l\n for p in range(l, r):\n q = n - 1 - p\n cache = matrix[l][p]\n matrix[l][p] = matrix[q][l]\n matrix[q][l] = matrix[r][q]\n matrix[r][q] = matrix[p][r]\n matrix[p][r] = cache", "def rotate(self, matrix):\n n = len(matrix)\n #转置\n for i in range(n):\n for j in range(i+1,n):\n matrix[i][j],matrix[j][i] = matrix[j][i],matrix[i][j]\n #镜像\n mid = n//2\n for i in range(n):\n for j in range(mid):\n matrix[i][j],matrix[i][n-j-1] = matrix[i][n-j-1],matrix[i][j]", "def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def rotateCCW(matrix, rtn=False):\n n = len(matrix)\n\n for x in range(n):\n for y in range(n-1, x-1, -1):\n matrix[x][y], matrix[y][x] = matrix[y][x], matrix[x][y]\n\n matrix.reverse()\n\n if rtn:\n return matrix", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n // 2):\n for t in range(n - 1 - 2 * i):\n temp = [matrix[i][-i-1-t], matrix[i+t][i], matrix[-i-1][i+t], matrix[-i-1-t][-i-1]]\n matrix[i][-i-1-t] = temp[1]\n matrix[i+t][i] = temp[2]\n matrix[-i-1][i+t] = temp[3]\n matrix[-i-1-t][-i-1] = temp[0]", "def rotate(self, matrix: List[List[int]]) -> None:\n for r in range(len(matrix)):\n for c in range(r):\n matrix[r][c], matrix[c][r] = matrix[c][r], matrix[r][c]\n for row in matrix:\n row.reverse()", "def rotate(self, matrix: List[List[int]]) -> None:\r\n n = len(matrix)\r\n for j in range((n+1)//2):\r\n for i in range(n-2*j-1):\r\n matrix[j][j+i], matrix[j+i][n-1-j], matrix[n-1-j][n-1-j-i], matrix[n-1-j-i][j] = matrix[n-1-j-i][j], matrix[j][j+i], matrix[j+i][n-1-j], matrix[n-1-j][n-1-j-i]", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n - 1):\n for j in range(n - 1 - i):\n matrix[i][j], matrix[n-1-j][n-1-i] = matrix[n-1-j][n-1-i], matrix[i][j]\n for i in range(n):\n for j in range(n // 2):\n matrix[j][i], matrix[n-1-j][i] = matrix[n-1-j][i], matrix[j][i]", "def rotate(self, matrix: List[List[int]]) -> None:\n flip(transpose(matrix))", "def rotate(X):\n return X" ]
[ "0.7443821", "0.72906315", "0.70417315", "0.70232594", "0.69875175", "0.69020265", "0.6857698", "0.68385106", "0.67997444", "0.677776", "0.67739475", "0.67630064", "0.6741109", "0.66981894", "0.6683466", "0.66559166", "0.66322666", "0.6608254", "0.6603616", "0.6599171", "0.6597836", "0.65562934", "0.6547541", "0.6545991", "0.6542392", "0.65386915", "0.653431", "0.6517447", "0.6514264", "0.651038" ]
0.7946977
0
Checks if the organism is elgible for a QN job and if so dispatches it. An organism is eligible for a QN job if it has more than MIN samples on a single platform.
def dispatch_qn_job_if_eligible(organism: Organism) -> None: samples = Sample.processed_objects.filter( organism=organism, has_raw=True, technology="MICROARRAY", is_processed=True, platform_name__contains="Affymetrix", ) if samples.count() < MIN: logger.info( "Total proccessed samples don't meet minimum threshhold", organism=organism, count=samples.count(), min=MIN, ) return platform_counts = ( samples.values("platform_accession_code") .annotate(dcount=Count("platform_accession_code")) .order_by("-dcount") ) biggest_platform = platform_counts[0]["platform_accession_code"] sample_codes_results = Sample.processed_objects.filter( platform_accession_code=biggest_platform, has_raw=True, technology="MICROARRAY", organism=organism, is_processed=True, ).values("accession_code") if sample_codes_results.count() < MIN: logger.info( "Number of processed samples for largest platform didn't mean threshold.", organism=organism, platform_accession_code=biggest_platform, count=sample_codes_results.count(), min=MIN, ) return sample_codes = [res["accession_code"] for res in sample_codes_results] dataset = Dataset() dataset.data = {organism.name + "_(" + biggest_platform + ")": sample_codes} dataset.aggregate_by = "ALL" dataset.scale_by = "NONE" dataset.quantile_normalize = False dataset.save() job = ProcessorJob() job.pipeline_applied = "QN_REFERENCE" job.save() pjda = ProcessorJobDatasetAssociation() pjda.processor_job = job pjda.dataset = dataset pjda.save() logger.info("Sending QN_REFERENCE for Organism", job_id=str(job.pk), organism=str(organism)) send_job(ProcessorPipeline.QN_REFERENCE, job) return job
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_fit_more(self):\n\n return len(self._requeue_jobs) < MAX_NUM", "def can_run_experiment(self, info, device):\n nb_qubit_max = self.backends[device]['nq']\n nb_qubit_needed = info['nq']\n return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed", "def handle(self, *args, **options):\n\n if options[\"organisms\"]:\n organism_names = options[\"organisms\"].split(\",\")\n organisms = Organism.objects.filter(name__in=organism_names)\n else:\n organisms = Organism.objects.all()\n\n for organism in organisms:\n dispatch_qn_job_if_eligible(organism)", "async def handle(self, iteration):\n if not (self.ai.floating_buildings_bm and self.ai.supply_used >= 199):\n for queen in self.queens.idle:\n if self.enemies.closer_than(10, queen.position):\n self.ai.add_action(queen.attack(self.enemies.closest_to(queen.position)))\n continue\n selected = self.hatchery.closest_to(queen.position)\n if queen.energy >= 25 and not selected.has_buff(QUEENSPAWNLARVATIMER):\n self.ai.add_action(queen(EFFECT_INJECTLARVA, selected))\n continue\n elif queen.energy >= 25:\n await self.ai.place_tumor(queen)\n\n for hatch in self.hatchery.ready.noqueue:\n if not self.queens.closer_than(4, hatch):\n for queen in self.queens.idle:\n if not self.ai.townhalls.closer_than(4, queen):\n self.ai.add_action(queen.move(hatch.position))\n break\n\n return True", "def workers_ready(self, qty=None):\n agents = self.agents_status()\n if any([a['state'] != 'RUNNING' for a in agents]):\n return False\n if qty and len(agents) != qty:\n return False\n return True", "def is_available(self, cmd):\n num_qubits = 0\n for qureg in cmd.all_qubits:\n num_qubits += len(qureg)\n return num_qubits <= 2", "def check_queue(st):\n\n logging.info(\"Checking queue...\")\n check_time = time.time()\n n_waiting_jobs = BatchPlugin.poll_queue()\n\n if n_waiting_jobs is not None:\n\n # Correction factor\n corr = st['vms_allegedly_running'] * cf['elastiq']['n_jobs_per_vm']\n logging.info(\"Jobs: waiting=%d | allegedly running=%d | considering=%d\" % \\\n (n_waiting_jobs, corr, n_waiting_jobs-corr))\n n_waiting_jobs -= corr\n\n if n_waiting_jobs > cf['elastiq']['waiting_jobs_threshold']:\n if st['first_seen_above_threshold'] != -1:\n if (check_time-st['first_seen_above_threshold']) > cf['elastiq']['waiting_jobs_time_s']:\n # Above threshold time-wise and jobs-wise: do something\n logging.info(\"Waiting jobs: %d (above threshold of %d for more than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n list_ok = scale_up( math.ceil(n_waiting_jobs / float(cf['elastiq']['n_jobs_per_vm'])), valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in list_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n st['first_seen_above_threshold'] = -1\n else:\n # Above threshold but not for enough time\n logging.info(\"Waiting jobs: %d (still above threshold of %d for less than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n else:\n # First time seen above threshold\n logging.info(\"Waiting jobs: %d (first time above threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = check_time\n else:\n # Not above threshold: reset\n logging.info(\"Waiting jobs: %d (below threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = -1\n else:\n logging.error(\"Cannot get the number of waiting jobs this time, sorry\")\n\n return {\n 'action': 'check_queue',\n 'when': time.time() + cf['elastiq']['check_queue_every_s']\n }", "async def train_queen(self):\n if (\n self.structures(UnitTypeId.SPAWNINGPOOL).ready\n and len(self.units(UnitTypeId.QUEEN)) < len(self.townhalls)\n and self.already_pending(UnitTypeId.QUEEN) < len(self.townhalls.ready)\n ):\n self.train(UnitTypeId.QUEEN)", "def should_start_analysis(self):\n return len(self.task_queue) >= self.bulk_size", "def validate_ready_to_run(self):\n super(FlexibleMaster, self).validate_ready_to_run()\n if len(self._job_name_lst) < len(self._step_function_lst) + 1:\n raise ValueError(\"Not enough job names set.\")\n elif len(self._job_name_lst) > len(self._step_function_lst) + 1:\n raise ValueError(\"Not enough step functions set.\")", "def testSchdulerCheckIngestReady(self):\n\t\tpipelines = {\n\t\t\t\"continuum\": {\n\t\t\t\t\"demand\": 5\n\t\t\t}\n\t\t}\n\t\tobservation = Observation(\n\t\t\t'planner_observation',\n\t\t\tOBS_START_TME,\n\t\t\tOBS_DURATION,\n\t\t\tOBS_DEMAND,\n\t\t\tOBS_WORKFLOW,\n\t\t\ttype=\"continuum\",\n\t\t\tdata_rate=2\n\t\t)\n\t\t# There should be capacity\n\t\tself.assertEqual(0.0, self.env.now)\n\t\tret = self.scheduler.check_ingest_capacity(observation, pipelines)\n\t\tself.assertTrue(ret)\n\n\t\t# Let's remove capacity to check it returns false\n\t\ttmp = self.cluster.available_resources\n\t\tself.cluster.available_resources = self.cluster.available_resources[:3]\n\t\tret = self.scheduler.check_ingest_capacity(observation, pipelines)\n\t\tself.assertFalse(ret)\n\t\tself.cluster.available_resources = tmp\n\t\tself.assertEqual(10, len(self.cluster.available_resources))", "async def should_handle(self):\n local_controller = self.controller\n workers_total = len(local_controller.workers)\n geysers = local_controller.extractors\n drones_in_queue = local_controller.already_pending(DRONE)\n if (\n not local_controller.close_enemies_to_base\n and local_controller.can_train(DRONE)\n and not local_controller.counter_attack_vs_flying\n ):\n if workers_total == 12 and not drones_in_queue:\n return True\n if (\n workers_total in (13, 14, 15)\n and len(local_controller.overlords) + local_controller.already_pending(OVERLORD) > 1\n ):\n return True\n optimal_workers = min(\n sum(x.ideal_harvesters for x in local_controller.townhalls | geysers), 90 - len(geysers)\n )\n return (\n workers_total + drones_in_queue < optimal_workers\n and np.sum(\n np.array(\n [\n len(local_controller.zerglings),\n len(local_controller.hydras),\n len(local_controller.ultralisks),\n ]\n )\n * np.array([1, 2, 3])\n )\n > 15\n )\n return False", "def _check_overprocessed_subcontract_qty(self):\n overprocessed_moves = self.env['stock.move']\n for move in self:\n if not move.is_subcontract:\n continue\n # Extra quantity is allowed when components do not need to be register\n if not move._has_tracked_subcontract_components():\n continue\n rounding = move.product_uom.rounding\n if float_compare(move.quantity_done, move.move_orig_ids.production_id.qty_produced, precision_rounding=rounding) > 0:\n overprocessed_moves |= move\n if overprocessed_moves:\n raise UserError(_(\"\"\"\nYou have to use 'Records Components' button in order to register quantity for a\nsubcontracted product(s) with tracked component(s):\n %s.\nIf you want to process more than initially planned, you\ncan use the edit + unlock buttons in order to adapt the initial demand on the\noperations.\"\"\") % ('\\n'.join(overprocessed_moves.mapped('product_id.display_name'))))", "def testQueueisEmpty(self):\n self.mgr.isGoproBusy = True\n self.mgr.processMsgQueue()\n self.assertFalse( self.mgr.isGoproBusy )", "def _check_items_limit(self):\n if self.items_limit and self.items_limit == self.get_metadata('items_count'):\n raise ItemsLimitReached('Finishing job after items_limit reached:'\n ' {} items written.'.format(self.get_metadata('items_count')))", "def call_q(self, _):\n return False", "def call_q(self, _):\n return False", "def call_q(self, _):\n return False", "def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False", "def is_busy(self):\n threads = len(self.executor._threads)\n if threads == 0:\n return False\n\n capacity = self.executor._work_queue.qsize() / float(threads)\n if capacity > 2:\n return True\n elif capacity < 1:\n return False\n else:\n return capacity > (random.random() + 1)", "def chisq(self, params=None):\n\n\t\tself._submit_to_queue( params )\n\t\tqueue_contents = self._retrieve_from_queue()\n\n\t\tret = 0\n\t\t# match experiments and calculate discrepancy\n\t\tfor (title,dQ) in queue_contents:\n\t\t\tE = self.get_experiment_by_title(title)\n\t\t\tret += E.chisq(dQ,writeback=False)\n\n\t\treturn ret", "def _check_n_jobs(n_jobs):\n _check_type(n_jobs, (\"int\",), \"n_jobs\")\n if n_jobs <= 0:\n n_cores = mp.cpu_count()\n n_jobs_orig = n_jobs\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n if n_jobs <= 0:\n raise ValueError(\n f\"If n_jobs has a non-positive value ({n_jobs_orig}), it must \"\n f\"not be less than the number of CPUs present ({n_cores}).\"\n )\n return n_jobs", "def configured(self):\n return super().configured and self.max_harvest is not None", "def qc_qubit(args):\n clarity_epp.qc.qubit.set_qc_flag(lims, args.process_id)", "def run_queued_until_change(self, classification):\n initial = self.shrink_target\n while self.has_queued_passes(classification) and self.shrink_target is initial:\n self.pop_queued_pass(classification)\n return self.shrink_target is not initial", "def schedule(self, job: Job) -> bool:\n if self.num_avail_cores < job.num_cores:\n return False\n\n # Find the available cores\n num_cores_found = 0\n\n for i in range(self.num_cores):\n if self.core_status[i] == 0:\n # available\n\n self.core_status[i] = job.num_timesteps\n self.core_job_id[i] = job.id\n \n self.num_avail_cores -= 1\n num_cores_found += 1\n if num_cores_found >= job.num_cores:\n # found all the cores needed, we're done\n break\n \n return True", "def _is_grade_request(self):\r\n return 'xqueue/submit' in self.path", "def test_notBeforeWhenEnqueueing(self):\n\n dbpool, qpool, clock, performerChosen = self._setupPools()\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return qpool.enqueueWork(\n txn, DummyWorkItem, a=3, b=9,\n notBefore=datetime.datetime(2012, 12, 12, 12, 12, 20)\n )\n\n yield check\n\n # This is going to schedule the work to happen with some asynchronous\n # I/O in the middle; this is a problem because how do we know when it's\n # time to check to see if the work has started? We need to intercept\n # the thing that kicks off the work; we can then wait for the work\n # itself.\n\n self.assertEquals(performerChosen, [])\n\n # Advance to exactly the appointed second.\n clock.advance(20 - 12)\n self.assertEquals(performerChosen, [True])\n\n # Wait for job\n while (yield inTransaction(dbpool.pool.connection, lambda txn: JobItem.all(txn))):\n clock.advance(1)\n\n # Work item complete\n self.assertTrue(DummyWorkItem.results == {1: 12})", "def simple_jobs(driver):\n free = driver.execute_script('return Math.ceil(game.resources.trimps.realMax()/2) - game.resources.trimps.employed')\n if free == 0:\n return\n\n total = free\n for job in JOBS:\n current_job_number = driver.execute_script('return game.jobs[\"%s\"].owned' % job)\n total += current_job_number\n\n for job in JOBS:\n diff = int(total * JOBS_RATIOS[job]) - driver.execute_script('return game.jobs[\"%s\"].owned' % job)\n if diff <= 0:\n # nothing to see here\n continue\n\n buy = min(diff, free)\n buy_jobs(driver, job, buy)", "def verify_queue_empty(self):\n self.assert_sample_queue_size(DataParticleType.VELOCITY_PARTICLE, 0)\n self.assert_sample_queue_size(DataParticleType.TIME_PARTICLE, 0)" ]
[ "0.61850584", "0.5900076", "0.5773874", "0.55981344", "0.5565287", "0.55206895", "0.5452229", "0.5430043", "0.5409027", "0.5397476", "0.53824", "0.5199531", "0.518388", "0.51629543", "0.5160733", "0.5147695", "0.5147695", "0.5147695", "0.51367724", "0.51311034", "0.51042396", "0.50276524", "0.50170594", "0.5007874", "0.5002147", "0.5000363", "0.49994883", "0.4991064", "0.49909404", "0.49877414" ]
0.76257104
0
Dispatch QN_REFERENCE creation jobs for all Organisms with a platform with enough processed samples.
def handle(self, *args, **options): if options["organisms"]: organism_names = options["organisms"].split(",") organisms = Organism.objects.filter(name__in=organism_names) else: organisms = Organism.objects.all() for organism in organisms: dispatch_qn_job_if_eligible(organism)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dispatch_qn_job_if_eligible(organism: Organism) -> None:\n samples = Sample.processed_objects.filter(\n organism=organism,\n has_raw=True,\n technology=\"MICROARRAY\",\n is_processed=True,\n platform_name__contains=\"Affymetrix\",\n )\n\n if samples.count() < MIN:\n logger.info(\n \"Total proccessed samples don't meet minimum threshhold\",\n organism=organism,\n count=samples.count(),\n min=MIN,\n )\n return\n\n platform_counts = (\n samples.values(\"platform_accession_code\")\n .annotate(dcount=Count(\"platform_accession_code\"))\n .order_by(\"-dcount\")\n )\n biggest_platform = platform_counts[0][\"platform_accession_code\"]\n\n sample_codes_results = Sample.processed_objects.filter(\n platform_accession_code=biggest_platform,\n has_raw=True,\n technology=\"MICROARRAY\",\n organism=organism,\n is_processed=True,\n ).values(\"accession_code\")\n\n if sample_codes_results.count() < MIN:\n logger.info(\n \"Number of processed samples for largest platform didn't mean threshold.\",\n organism=organism,\n platform_accession_code=biggest_platform,\n count=sample_codes_results.count(),\n min=MIN,\n )\n return\n\n sample_codes = [res[\"accession_code\"] for res in sample_codes_results]\n\n dataset = Dataset()\n dataset.data = {organism.name + \"_(\" + biggest_platform + \")\": sample_codes}\n dataset.aggregate_by = \"ALL\"\n dataset.scale_by = \"NONE\"\n dataset.quantile_normalize = False\n dataset.save()\n\n job = ProcessorJob()\n job.pipeline_applied = \"QN_REFERENCE\"\n job.save()\n\n pjda = ProcessorJobDatasetAssociation()\n pjda.processor_job = job\n pjda.dataset = dataset\n pjda.save()\n\n logger.info(\"Sending QN_REFERENCE for Organism\", job_id=str(job.pk), organism=str(organism))\n send_job(ProcessorPipeline.QN_REFERENCE, job)\n\n return job", "def manageBuscoQuery(output_dir, individual_or_summary, samples,\n mets_or_mags, pep_ext, nt_ext,\n sample_dir, organisms, organisms_taxonomy,\n tax_tab, busco_threshold, perc_mem):\n max_jobs = calc_max_jobs(len(samples), perc_mem = perc_mem)\n samples_complete = []\n if individual_or_summary == \"individual\":\n if len(organisms) != len(organisms_taxonomy):\n print(\"A different number of organisms was specified than \"+\\\n \"the taxonomic levels given in \" +\\\n \"individual mode. Please check inputs.\")\n sys.exit(1)\n if (len(organisms) == 0) | (len(organisms_taxonomy) == 0):\n print(\"The number of organisms specified was \" + str(len(organisms)) +\\\n \" and the number of taxonomic levels specified was \" +\\\n str(len(organisms_taxonomy)) +\n \" in individual mode. Neither can be zero. \"+\\\n \"Please check inputs.\")\n sys.exit(1)\n\n for sample_name in samples:\n # the BUSCO table that we're interested in using that contains the\n # BUSCO matches and their level of completeness\n if not os.path.isfile(os.path.join(output_dir, \"busco\",\n sample_name, \"run_eukaryota_odb10\",\n \"full_table.tsv\")):\n print(\"BUSCO run either did not complete successfully, \"+\\\n \"or returned no matches for sample\",\n sample_name,\". Check busco_run log for details.\")\n continue\n samples_complete.append(sample_name)\n \n busco_table = os.path.join(output_dir, \"busco\", sample_name,\n \"run_eukaryota_odb10\", \"full_table.tsv\")\n missing_buscos = pd.read_csv(os.path.join(output_dir, \"busco\",\n sample_name,\n \"run_eukaryota_odb10\",\n \"missing_busco_list.tsv\"),\n sep = \"\\t\", comment = \"#\", header = None)\n if len(missing_buscos.index) < 255:\n print(\"At least one BUSCO present in sample\",sample_name,\"but\",\n len(missing_buscos.index),\n \"missing.\",flush=True)\n samples_complete.append(sample_name)\n else:\n print(\"No matches returned for sample\",sample_name,\n \". Assessment files will be empty.\",flush=True)\n # the prefix to specify where the taxonomy estimation\n # output files are located\n taxfile_stub = os.path.join(output_dir, \"taxonomy_counts\",\n output_dir.split(\"/\")[-1])\n\n if mets_or_mags == \"mets\":\n if os.path.isfile(os.path.join(output_dir, mets_or_mags,\n sample_name + \".\" + pep_ext)):\n fasta = os.path.join(output_dir, mets_or_mags,\n sample_name + \".\" + pep_ext)\n else:\n fasta = os.path.join(sample_dir, sample_name + \".\" + nt_ext)\n else:\n fasta = os.path.join(sample_dir, sample_name + \".\" + pep_ext)\n\n query_busco_log = open(os.path.join(output_dir,\"log\",\"busco_query_\" +\\\n sample_name + \".log\"), \"w+\")\n query_busco_err = open(os.path.join(output_dir,\"log\",\"busco_query_\" +\\\n sample_name + \".err\"), \"w+\")\n sys.stdout = query_busco_log\n sys.stderr = query_busco_err\n query_args = [\"--organism_group\",str(\" \".join(organisms)),\"--taxonomic_level\",\n str(\" \".join(organisms_taxonomy)),\"--output_dir\",output_dir,\n \"--fasta_file\",fasta,\"--sample_name\",sample_name,\n \"--taxonomy_file_prefix\",taxfile_stub,\n \"--tax_table\",tax_tab,\"--busco_out\",busco_table,\n \"-i\",\"individual\",\n \"--busco_threshold\",str(busco_threshold)]\n try:\n rc = queryBusco(query_args)\n except:\n print(\"BUSCO query did not run successfully for sample \" +\\\n sample_name + \"; check log file for details.\")\n sys.exit(1)\n\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n #if (rc != 0) | (os.stat(os.path.join(\"log\",\"busco_query_\" + sample_name +\n # \".err\")).st_size != 0):\n # print(\"BUSCO query did not run successfully for sample\n # \" + sample_name + \"; check log file for details.\")\n # sys.exit(1)\n else:\n for sample_name in samples:\n # the BUSCO table that we're interested in using that contains the\n # BUSCO matches and their level of completeness\n if not os.path.isfile(os.path.join(output_dir, \"busco\", sample_name,\n \"run_eukaryota_odb10\", \"full_table.tsv\")):\n print(\"BUSCO run either did not complete successfully, \",\n \"or returned no matches for sample\",\n sample_name,\". Check busco_run log for details.\",\n flush=True)\n continue\n busco_table = os.path.join(output_dir, \"busco\", sample_name,\n \"run_eukaryota_odb10\", \"full_table.tsv\")\n missing_buscos = pd.read_csv(os.path.join(output_dir, \"busco\", sample_name,\n \"run_eukaryota_odb10\",\n \"missing_busco_list.tsv\"),\n sep = \"\\t\", comment = \"#\", header = None)\n if len(missing_buscos.index) < 255:\n print(\"At least one BUSCO present in sample\",sample_name,\n \"but\",len(missing_buscos.index),\n \"missing.\",flush=True)\n samples_complete.append(sample_name)\n else:\n print(\"No matches returned for sample\",sample_name,\n \". Assessment files will be empty.\",flush=True)\n # the prefix to specify where the taxonomy estimation output files are located\n taxfile_stub = os.path.join(output_dir, \"taxonomy_counts\",\n output_dir.split(\"/\")[-1])\n\n if mets_or_mags == \"mets\":\n fasta = os.path.join(output_dir, mets_or_mags,\n sample_name + \".\" + pep_ext)\n else:\n fasta = os.path.join(sample_dir, sample_name + \".\" + nt_ext)\n\n query_busco_log = open(os.path.join(output_dir,\"log\",\n \"busco_query_\" + sample_name + \".log\"),\n \"w+\")\n query_busco_err = open(os.path.join(output_dir,\"log\",\n \"busco_query_\" + sample_name + \".err\"),\n \"w+\")\n sys.stdout = query_busco_log\n sys.stderr = query_busco_err\n query_args = [\"--output_dir\",output_dir,\"--fasta_file\",fasta,\"--sample_name\",\n sample_name,\"--taxonomy_file_prefix\",taxfile_stub,\"--tax_table\",\n tax_tab,\"--busco_out\",busco_table,\"-i\",\"summary\"]\n\n try:\n rc = queryBusco(query_args)\n except OSError as e:\n print(\"Not all files needed to run BUSCO query \",\n \"(output of BUSCO run) found;\",\\\n \"check log file for details. Here is the error:\",e)\n rc = 1\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n rc = 1\n\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n if (rc != 0) | (os.stat(os.path.join(output_dir,\"log\",\n \"busco_query_\" +\\\n sample_name + \".err\")).st_size != 0):\n print(\"BUSCO query did not run successfully for sample \" +\\\n sample_name + \"; check log file for details.\")\n sys.exit(1)\n else:\n print(\"BUSCO query complete.\")\n \n if len(samples_complete) == 0:\n print(\"No BUSCO matches found for any sample. \",\n \"Check BUSCO run log for details. Exiting...\")\n return False\n return True", "def do_build(self):\n self.build_queue = sorted(self.build_queue, key=lambda q: q['priority'])\n for item in self.build_queue:\n for i in range(0, item['count']['raw']):\n item['town'].owned.append(item['unit'])", "def setup_jobs(self):\n transfer_args = [\"analysis_type\", \"perturbation\", \"num_permutations\", \"permutation_test_statistic\", \"loss_function\",\n \"importance_significance_level\", \"window_search_algorithm\", \"window_effect_size_threshold\"]\n jobs = [None] * self.num_jobs\n for idx in range(self.num_jobs):\n # Create and launch condor job\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n input_files = [features_filename, self.args.model_filename, self.args.model_loader_filename, self.args.data_filename]\n job_dir = f\"{self.args.output_dir}/outputs_{idx}\"\n cmd = f\"python3 -m anamod.core.worker -worker_idx {idx}\"\n for arg in transfer_args:\n if hasattr(self.args, arg):\n cmd += f\" -{arg} {getattr(self.args, arg)}\"\n # Relative file paths for non-shared FS, absolute for shared FS\n for name, path in dict(output_dir=job_dir, features_filename=features_filename, model_filename=self.args.model_filename,\n model_loader_filename=self.args.model_loader_filename, data_filename=self.args.data_filename).items():\n cmd += f\" -{name} {os.path.abspath(path)}\" if self.args.shared_filesystem else f\" -{name} {os.path.basename(path)}\"\n job = CondorJobWrapper(cmd, input_files, job_dir, shared_filesystem=self.args.shared_filesystem,\n memory=f\"{self.args.memory_requirement}GB\", disk=f\"{self.args.disk_requirement}GB\",\n avoid_bad_hosts=self.args.avoid_bad_hosts, retry_arbitrary_failures=self.args.retry_arbitrary_failures,\n cleanup=self.args.cleanup)\n jobs[idx] = job\n return jobs", "def step_impl(context, objects_type):\n\n log.info(\"=====> From the STB verify that the HTTP Cache is built and the objects from the nsa file are available to query\")\n\n if objects_type == \"ObjectsTest1\":\n object_list = resourceset_parameters.ObjectsTest1\n elif objects_type == \"ObjectsTest2\":\n object_list = resourceset_parameters.ObjectsTest2\n elif objects_type == \"ObjectsTest4\":\n object_list = resourceset_parameters.ObjectsTest4\n else:\n assert False, \" ****> Failed: No objects_type parameter while posting. Got: {objects_type}\".format(objects_type=objects_type)\n\n for i in object_list:\n verify_object_available_in_stb(context, resourceset_parameters.Object_names[i])", "def createUsedBy( self ):\n for aChunk in self.chunkSeq:\n #usage = (self.fullNameFor(aChunk.name), aChunk.seq)\n for aRefName in aChunk.genReferences( self ):\n for c in self.getchunk( aRefName ):\n c.referencedBy.append( aChunk )\n c.refCount += 1\n \n for nm in self.no_reference():\n self.logger.warn( \"No reference to {!r}\".format(nm) )\n for nm in self.multi_reference():\n self.logger.warn( \"Multiple references to {!r}\".format(nm) )\n for nm in self.no_definition():\n self.logger.error( \"No definition for {!r}\".format(nm) )\n self.errors += 1", "def _link_jobs(self):\n for i, j in enumerate(self.jobs):\n j.link(self, i)\n j.claim_artifacts()", "def _instantiate__deferred_inits(self, context=None):\n\n # For each mechanism in the Process, in backwards order through its _mech_tuples\n for item in reversed(self._mech_tuples):\n mech = item[OBJECT_ITEM]\n mech._deferred_init()\n\n # For each inputState of the mechanism\n for input_state in mech.inputStates.values():\n input_state._deferred_init()\n # Restrict projections to those from mechanisms in the current process\n projections = []\n for projection in input_state.receivesFromProjections:\n try:\n if self in projection.sender.owner.processes:\n projections.append(projection)\n except AttributeError:\n pass\n self._instantiate__deferred_init_projections(projections, context=context)\n\n # For each parameterState of the mechanism\n for parameter_state in mech.parameterStates.values():\n parameter_state._deferred_init()\n self._instantiate__deferred_init_projections(parameter_state.receivesFromProjections)\n\n # Label monitoring mechanisms and add _monitoring_mech_tuples to _mech_tuples for execution\n if self._monitoring_mech_tuples:\n\n # Add designations to newly created MonitoringMechanisms:\n for mech_tuple in self._monitoring_mech_tuples:\n mech = mech_tuple[OBJECT_ITEM]\n # If\n # - mech is a TARGET ObjectiveMechanism, and\n # - the mech that projects to mech is a TERMINAL for the current process, and\n # - current process has learning specified\n # then designate mech as a TARGET\n if (isinstance(mech, ObjectiveMechanism) and\n # any(projection.sender.owner.processes[self] == TERMINAL\n # for projection in mech.inputStates[SAMPLE].receivesFromProjections) and\n mech.learning_role is TARGET and\n self.learning\n ):\n mech_tuple[0].processes[self] = TARGET\n else:\n # mech must be a LearningMechanism;\n # If a learning_rate has been specified for the process, assign that to all LearningMechanisms\n # for which a mechanism-specific learning_rate has NOT been assigned\n if (self.learning_rate is not None and\n mech.function_object.learning_rate is None):\n mech.function_object.learning_rate = self.learning_rate\n\n # Assign its label\n mech_tuple[0].processes[self] = MONITORING\n\n # Add _monitoring_mech_tuples to _mech_tuples\n self._mech_tuples.extend(self._monitoring_mech_tuples)\n\n # IMPLEMENTATION NOTE:\n # MonitoringMechanisms for learning are assigned _phaseSpecMax;\n # this is so that they will run after the last ProcessingMechansisms have run", "def process_quasar(folder, set_type, doc_size):\n print(\"def process_quasar(folder, set_type, doc_size) ...\")\n\n # create counter for enumeration of batch-files\n counter = 0\n\n # Question File and Path\n question_file = set_type + \"_questions.json\"\n question_file_path = Path(\"/\".join([folder, \"questions\", question_file]))\n\n # Contexts File and Path\n context_file = set_type + \"_contexts.json\"\n context_file_path = Path(\"/\".join([folder, \"contexts\", doc_size, context_file]))\n\n with open(question_file_path, \"r\") as qf, open(context_file_path, \"r\") as cf:\n question_id_list = list()\n data_dict = dict()\n batches_data = list()\n\n # Parse each line separate to avoid memory issues\n for line in qf:\n parsed_question = json.loads(line)\n question_id = parsed_question[\"uid\"]\n question_id_list.append(question_id)\n data_dict[question_id] = {\"answer\": parsed_question[\"answer\"]}\n data_dict[question_id].update({\"question\": parsed_question[\"question\"]})\n\n # in order to create batches with the size of 30 and to avoid Memory Errors\n if len(data_dict) == 30:\n contexts_counter = 0\n for line2 in cf:\n parsed_answer = json.loads(line2)\n # Answer ID should have a corresponding question ID\n answer_id = parsed_answer[\"uid\"]\n if answer_id in question_id_list:\n contexts_counter += 1\n # List of contexts with retrieval scores, contexts are sorted from highest to lowest score\n answer_contexts = parsed_answer[\"contexts\"]\n # remove scores of contexts\n cleaned_answer_contexts = [ls_elem[1] for ls_elem in answer_contexts]\n data_dict[answer_id].update({\"contexts\": cleaned_answer_contexts})\n if contexts_counter == 30:\n contexts_counter = 0\n break\n\n # add information where answer in context is\n answers_list, questions_list, contexts_list = add_end_idx(data_dict)\n\n # create the batch-encodings\n batches_data.append(create_encodings(answers_list, questions_list, contexts_list))\n data_dict.clear()\n question_id_list.clear()\n # if len(batches_data) % 1000 == 0:\n\n print(\"\\n length batches_data \" + str(len(batches_data)) + \" \" + str(counter))\n\n if len(batches_data) == 2000:\n counter += 1\n save_batch_files(\"/local/anasbori/bert_odqa/ODQA_Bert_Project/batch_output\", batches_data,\n counter)\n\n batches_data.clear()\n\n counter += 1\n save_batch_files(Path(\"/local/anasbori/bert_odqa/ODQA_Bert_Project/batch_output\"), batches_data, counter)", "def build_reference(sets_to_reference):\n\n number_to_uuid = {}\n card_reference = {}\n name_to_uuid = {}\n uuid_to_number = {}\n\n print(\"- Building internal Card Reference -\")\n for setName in tqdm(sets_to_reference) :\n # Fix 1 on WIN systems since CON.json is reserved :\n if setName == 'CON':\n setName = 'CON_'\n # End Fix 1\n with open(ROOT_DIR + 'data/sets/' + setName + '.json') as f:\n # Fix 2 on WIN systems since CON.json is reserved :\n if setName == 'CON_':\n setName = 'CON'\n # End Fix 2\n data = json.load(f)\n name_to_uuid[setName]= {} \n number_to_uuid[setName]= {}\n card_reference[setName]= {}\n uuid_to_number[setName]= {}\n for item in data['data']['cards']:\n #print(item)\n number_to_uuid[setName][item['number']] = item['uuid']\n name_to_uuid[setName][item['name']] = item['uuid']\n uuid_to_number[setName][item['uuid']] = item['number']\n foreignName = {}\n for languageData in item['foreignData']:\n if languageData['language'] == 'Spanish' and 'ES' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'ES'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'French' and 'FR' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'FR'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'German' and 'DE' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'DE'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Italian' and 'IT' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'IT'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Portuguese' and 'PT' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'PT'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Japanese' and 'JP' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'JP'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Korean' and 'KO' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'KO'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Russian' and 'RU' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'RU'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Chinese' and 'ZH' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'ZH'\n foreignName[language] = languageData['name']\n card_reference[setName][item['uuid']] = {'name' : item['name'],\n 'colorIdentity' : item['colorIdentity'],\n 'convertedManaCost' : item['convertedManaCost'],\n 'legalities' : item['legalities'],\n 'foreignName' : foreignName,\n 'number' : item['number'],\n 'rarity' : item['rarity'],\n 'setCode' : item['setCode'],\n 'subtypes' : item['subtypes'],\n 'supertypes' : item['supertypes'],\n 'types' : item['types'],\n 'uuid' : item['uuid'] }\n try :\n card_reference[setName][item['uuid']]['keywords'] = item['keywords']\n except :\n pass\n try :\n card_reference[setName][item['uuid']]['power'] = item['power']\n except :\n pass\n try :\n card_reference[setName][item['uuid']]['toughness'] = item['toughness']\n except :\n pass \n try :\n card_reference[setName][item['uuid']]['manaCost'] = item['manaCost']\n except :\n pass\n # Token version of the set : setname is preceded by 'T' \n name_to_uuid['T'+setName]= {} \n number_to_uuid['T'+setName]= {}\n card_reference['T'+setName]= {}\n uuid_to_number['T'+setName]= {} \n for item in data['data']['tokens']:\n number_to_uuid['T'+setName][item['number']] = item['uuid']\n name_to_uuid['T'+setName][item['name']] = item['uuid']\n uuid_to_number['T'+setName][item['uuid']] = item['number']\n card_reference['T'+setName][item['uuid']] = {'name' : item['name'],\n 'colorIdentity' : item['colorIdentity'],\n 'convertedManaCost' : 0,\n 'number' : item['number'],\n 'setCode' : item['setCode'],\n 'subtypes' : item['subtypes'],\n 'supertypes' : item['supertypes'],\n 'types' : item['types'],\n 'uuid' : item['uuid'] }\n return (card_reference, name_to_uuid, number_to_uuid, uuid_to_number)", "def makeLargeTracts(input_queue, output_queue, config, db_config):\n\n \n # capture the process name\n my_name = mp.current_process().name\n my_ip_address = socket.gethostbyname(socket.gethostname())\n\n while True:\n try:\n # get the next element out of the queue\n inputs = input_queue.get()\n try:\n if inputs[0] is None: break\n\n # extract the terms from the queue list\n numprov_path = inputs[0] \n blockm_df = inputs[1] \n out_tract_path = inputs[2] \n out_county_path = inputs[3] \n out_tract_df = inputs[4]\n out_county_df = inputs[5] \n start_time = inputs[6] \n worker_speed = inputs[7]\n config = inputs[8]\n geom = 'geoid%s' % config['census_vintage'][2:]\n\n continue_run, block_numprov = openNumprovFile(numprov_path, geom, \n my_name, my_ip_address, worker_speed, \n start_time, output_queue)\n\n if continue_run:\n continue_run, block_numprov = mergeWithDataFrame(\n block_numprov, blockm_df, geom, my_name, \n my_ip_address, worker_speed, start_time, \n output_queue) \n\n if continue_run:\n for geo in ['tract', 'county']:\n continue_run, out_df = findPerCapitaProviders(my_name, \n my_ip_address, geo, block_numprov, \n output_queue, start_time, config, \n worker_speed, eval('out_%s_df' % geo))\n \n if continue_run:\n continue_run = outputGeoData(out_df, \n eval('out_%s_path' % geo), my_name, \n my_ip_address, geo, worker_speed, \n start_time, output_queue)\n\n except:\n pass\n\n except:\n # nothing in the queue, wait and check again\n time.sleep(1)\n\n return True", "def conformational_sampling(settings, systems, n_conf, steps_integrator, interface=None):\n for system in systems:\n # Create QM Engines\n if system.interface is None:\n system.interface = ParaMolInterface()\n\n system.create_qm_engines(settings.qm_engine[\"qm_engine\"], settings.qm_engine[settings.qm_engine[\"qm_engine\"].lower()])\n\n system.ref_coordinates = []\n system.ref_energies = []\n system.ref_forces = []\n system.n_structures = 0\n\n logging.info(\"Performing conformational sampling of {} conformations of system {}.\".format(n_conf, system.name))\n\n for i in range(n_conf):\n system.engine.context.setVelocitiesToTemperature(system.engine.integrator.getTemperature())\n\n # Perform classical MD\n system.engine.integrator.step(steps_integrator)\n\n # Get positions and compute QM energy and forces\n coord = system.engine.context.getState(getPositions=True).getPositions()\n energy, forces = system.qm_engine.qm_engine.run_calculation(coords=coord.in_units_of(unit.angstrom)._value, label=0)\n\n # Append energies, forces and conformations\n system.ref_energies.append(energy)\n system.ref_forces.append(np.asarray(forces))\n system.ref_coordinates.append(np.asarray(coord._value))\n system.n_structures += 1\n\n system.ref_forces = np.asarray(system.ref_forces)\n system.ref_energies = np.asarray(system.ref_energies)\n system.ref_coordinates = np.asarray(system.ref_coordinates)\n\n logging.info(\"Conformational sampling of all systems performed successfully.\")\n\n return systems", "def _setup():\n for item in dominos:\n queue.append([item])", "def generate_sample_sheet(self):\n pool = self.pool\n bcl2fastq_sample_ids = []\n i7_names = []\n i7_sequences = []\n i5_names = []\n i5_sequences = []\n wells = []\n plate = pool.container.external_id\n sample_ids = []\n sequencer_type = self.sequencer.equipment_type\n\n for component in pool.components:\n lp_composition = component['composition']\n # Get the well information\n wells.append(lp_composition.container.well_id)\n # Get the i7 index information\n i7_comp = lp_composition.i7_composition.primer_set_composition\n i7_names.append(i7_comp.external_id)\n i7_sequences.append(i7_comp.barcode)\n # Get the i5 index information\n i5_comp = lp_composition.i5_composition.primer_set_composition\n i5_names.append(i5_comp.external_id)\n i5_sequences.append(i5_comp.barcode)\n # Get the sample id\n sample_id = lp_composition.normalized_gdna_composition.\\\n gdna_composition.sample_composition.content\n sample_ids.append(sample_id)\n\n # Transform te sample ids to be bcl2fastq-compatible\n bcl2fastq_sample_ids = [\n SequencingProcess._bcl_scrub_name(sid) for sid in sample_ids]\n # Reverse the i5 sequences if needed based on the sequencer\n i5_sequences = SequencingProcess._sequencer_i5_index(\n sequencer_type, i5_sequences)\n\n data = SequencingProcess._format_sample_sheet_data(\n bcl2fastq_sample_ids, i7_names, i7_sequences, i5_names,\n i5_sequences, wells=wells, sample_plate=plate,\n description=sample_ids, sample_proj=self.run_name,\n lanes=self.lanes, sep=',')\n\n contacts = {c.name: c.email for c in self.contacts}\n pi = self.principal_investigator\n principal_investigator = {pi.name: pi.email}\n sample_sheet_dict = {\n 'comments': SequencingProcess._format_sample_sheet_comments(\n principal_investigator=principal_investigator,\n contacts=contacts),\n 'IEMFileVersion': '4',\n 'Investigator Name': pi.name,\n 'Experiment Name': self.experiment,\n 'Date': str(self.date),\n 'Workflow': 'GenerateFASTQ',\n 'Application': 'FASTQ Only',\n 'Assay': self.assay,\n 'Description': '',\n 'Chemistry': 'Default',\n 'read1': self.fwd_cycles,\n 'read2': self.rev_cycles,\n 'ReverseComplement': '0',\n 'data': data}\n return SequencingProcess._format_sample_sheet(sample_sheet_dict)", "def _start_torque_workers(self):\n for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='run'):\n resource_args = []\n\n request_cpus = self._compute_request_cpus(bundle)\n if request_cpus:\n resource_args.extend(['-l', 'nodes=1:ppn=%d' % request_cpus])\n\n request_memory = self._compute_request_memory(bundle)\n if request_memory:\n resource_args.extend(['-l', 'mem=%d' % request_memory])\n\n request_queue = bundle.metadata.request_queue or self._default_request_queue\n if request_queue:\n # Either host=<host-name> or <queue-name>, but not tag=<tag>\n m = re.match('host=(.+)', request_queue)\n tagm = re.match('tag=.+', request_queue)\n if m:\n resource_args.extend(['-l', 'host=' + m.group(1)])\n elif not tagm:\n resource_args.extend(['-q', request_queue])\n\n request_priority = bundle.metadata.request_priority or self._default_request_priority\n if request_priority:\n resource_args.extend(['-p', str(request_priority)])\n\n script_args = [\n '--server', self._torque_bundle_service_url,\n '--password-file', self._torque_password_file,\n '--shared-file-system',\n ]\n\n script_env = {\n 'LOG_DIR': self._torque_log_dir,\n 'WORKER_CODE_DIR': self._torque_worker_code_dir,\n # -v doesn't work with spaces, so we have to hack it.\n 'WORKER_ARGS': '|'.join(script_args),\n }\n\n command = self._torque_ssh_command(\n ['qsub',\n '-k', 'n', # do not keep stdout/stderr streams (we redirect them manually to the configured log_dir)\n '-d', '/tmp', # avoid chdir permission problems, worker won't do anything in working directory anyway\n '-v', ','.join([k + '=' + v for k, v in script_env.iteritems()])] +\n resource_args +\n ['-S', '/bin/bash', os.path.join(self._torque_worker_code_dir, 'worker.sh')])\n\n # Throttle Torque commands, sometimes scheduler has trouble keeping up\n elapsed = time.time() - self._last_qsub_time\n if elapsed < self._torque_min_seconds_between_qsub:\n time.sleep(self._torque_min_seconds_between_qsub - elapsed)\n\n try:\n job_handle = subprocess.check_output(command, stderr=subprocess.STDOUT).strip()\n except subprocess.CalledProcessError as e:\n failure_message = 'Failed to launch Torque job: ' + e.output\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})\n continue\n finally:\n self._last_qsub_time = time.time()\n\n logger.info('Started Torque worker for bundle %s, job handle %s', bundle.uuid, job_handle)\n self._model.set_waiting_for_worker_startup_bundle(bundle, job_handle)", "def test_run_pick_closed_reference_otus_parallel(self):\r\n run_pick_closed_reference_otus(\r\n self.test_data['seqs'][0],\r\n self.test_data['refseqs'][0],\r\n self.test_out,\r\n self.test_data['refseqs_tax'][0],\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n parallel=True,\r\n status_update_callback=no_status_updates)\r\n\r\n input_file_basename = splitext(split(self.test_data['seqs'][0])[1])[0]\r\n otu_map_fp = join(self.test_out, 'uclust_ref_picked_otus',\r\n '%s_otus.txt' % input_file_basename)\r\n otu_table_fp = join(self.test_out, 'otu_table.biom')\r\n otu_table = parse_biom_table(open(otu_table_fp, 'U'))\r\n expected_sample_ids = ['f1', 'f2', 'f3', 'f4', 'p1', 'p2', 't1', 't2']\r\n self.assertItemsEqual(otu_table.SampleIds, expected_sample_ids)\r\n\r\n # Number of OTUs matches manually confirmed result\r\n otu_map_lines = list(open(otu_map_fp))\r\n num_otus = len(otu_map_lines)\r\n otu_map_otu_ids = [o.split()[0] for o in otu_map_lines]\r\n self.assertEqual(num_otus, 3)\r\n\r\n # parse the otu table\r\n otu_table = parse_biom_table(open(otu_table_fp, 'U'))\r\n expected_sample_ids = ['f1', 'f2', 'f3', 'f4', 'p1', 'p2', 't1', 't2']\r\n # sample IDs are as expected\r\n self.assertItemsEqual(otu_table.SampleIds, expected_sample_ids)\r\n # otu ids are as expected\r\n self.assertItemsEqual(otu_table.ObservationIds, otu_map_otu_ids)\r\n\r\n # expected number of sequences in OTU table\r\n number_seqs_in_otu_table = sum([v.sum()\r\n for v in otu_table.iterSampleData()])\r\n self.assertEqual(number_seqs_in_otu_table, 117)\r\n\r\n # One tax assignment per otu\r\n self.assertEqual(len(otu_table.ObservationMetadata), 3)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)", "def execute(self, sample_files: pd.DataFrame, reference_file: Path, ncores: int = 1) -> ExecutorResults:\n pass", "def test_build_reference_items(self):\n items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n obs1, obs2 = build_reference(items, 3)\n self.assertEqual(len(obs1), 3)\n self.assertEqual(len(obs2), 7)\n #check that the ref and nonref are same\n for num in obs1:\n if num not in items or num in obs2:\n raise AssertionError(str(num) + \" not expected in ref!\")\n for num in obs2:\n if num not in items or num in obs1:\n raise AssertionError(str(num) + \" not expected in nonref!\")\n for num in items:\n if num not in obs1 and num not in obs2:\n raise AssertionError(str(num) + \" not observed!\")", "def create_correlation_test_docs():\n n_materials = 200\n pnstore = MongograntStore(\"ro:mongodb03.nersc.gov/propnet\", \"propnet_july2019\")\n pnstore.connect()\n cursor = pnstore.query(\n criteria={'$and': [\n {'$or': [{p: {'$exists': True}},\n {'inputs.symbol_type': p}]}\n for p in PROPNET_PROPS]},\n properties=['task_id', 'inputs'] +\n [p + '.mean' for p in PROPNET_PROPS] +\n [p + '.units' for p in PROPNET_PROPS] +\n [p + '.quantities' for p in PROPNET_PROPS])\n data = []\n for item in cursor:\n if len(data) < n_materials:\n data.append(item)\n else:\n cursor.close()\n break\n dumpfn(data, os.path.join(CORR_TEST_DIR, \"correlation_propnet_data.json\"))", "def multi_bamqc(samplelist, bamqc_waitlist, bamqcdir, format='PDF', **other_qsub_options):\n modules = pmgctools.check_vars(['qualimap'])\n\n # create sample list\n names = []\n samplelist_file = os.path.join(bamqcdir, 'samplelist')\n with open(samplelist_file, 'w') as f:\n for sample in samplelist:\n names.append(sample)\n print('{}\\t{}'.format(sample, os.path.join(bamqcdir, 'tmp', sample)), file=f)\n\n name = '__'.join(names)[:200]\n multi_dir = os.path.join(bamqcdir, 'tmp', 'multi_bamqc')\n cmd = \"qualimap multi-bamqc -d {} -outdir {}\".format(samplelist_file, multi_dir)\n if format.upper() == 'HTML':\n cmd += '\\nmv {} {}\\n'.format(multi_dir, os.path.join(bamqcdir, name + '_bamqc'))\n else:\n cmd += ' -outformat pdf'\n cmd += '\\nmv {} {}\\n'.format(os.path.join(multi_dir, 'report.pdf'),\n os.path.join(bamqcdir, name + '_bamqc.pdf'))\n\n cmd += '''while read line; do sample=( $line ); echo ${{sample[0]}}>>{output}/coverage.txt; echo ---->>{output}/coverage.txt; grep 'mean coverageData\\|std coverageData' ${{sample[1]}}/genome_results.txt>>{output}/coverage.txt; done<{output}/samplelist; rm -rf {output}/tmp; rm {output}/samplelist'''.format(output=bamqcdir)\n\n return qsub.qsub('bamqc_' + name, cmd, modules=modules, waitlist=','.join(bamqc_waitlist), other='cpu=1|mem={}|walltime=2:00:00'.format('16gb'), **other_qsub_options)", "def main(target_clusters, clusters_per_lane, project_id, dest_plate_list, allow_non_dupl_struct): \n couch = connection()\n structure = proj_struct(couch, project_id, target_clusters)\n [lane_maps, clusters_rem, clusters_expr] = parse_indata(structure, target_clusters)\n if allow_non_dupl_struct:\n aggregator(lane_maps,clusters_rem,clusters_per_lane)\n else:\n simple_unique_set(lane_maps)\n [ideal_ratios, req_lanes, total_lanes] = sample_distributor(lane_maps, clusters_rem, clusters_per_lane)\n acc_ratios = correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes)\n generate_output(project_id, dest_plate_list, total_lanes, req_lanes, lane_maps, acc_ratios)", "def simulateArrivalOfJobs(env, processes, batchQ):\n for p in processes:\n batchQ.addToBq(p)", "def simulateArrivalOfJobs(env, processes, batchQ):\n for p in processes:\n batchQ.addToBq(p)", "def get(self):\n for job in data_types.Job.query():\n if not utils.string_is_true(job.get_environment().get('CORPUS_PRUNE')):\n continue\n\n latest_revision = _get_latest_job_revision(job)\n if not latest_revision:\n continue\n\n queue = tasks.queue_for_job(job.name)\n for target_job in fuzz_target_utils.get_fuzz_target_jobs(job=job.name):\n tasks.add_task(\n 'corpus_pruning',\n '%s@%s' % (target_job.fuzz_target_name, latest_revision),\n job.name,\n queue=queue)", "def runProjectionMatching(self, iterN, refN, args, **kwargs):\n projMatchRootName = self._getFileName('projMatchRootNames', iter=iterN, ref=refN)\n refname = self._getFileName('projectLibraryStk', iter=iterN, ref=refN)\n \n numberOfCtfGroups = self.numberOfCtfGroups.get()\n# ctfGroupName = self._getPath(self.ctfGroupDirectory, '%(ctfGroupRootName)s')\n #remove output metadata\n cleanPath(projMatchRootName)\n \n for ctfN in reversed(list(self.allCtfGroups())):\n self._log.info('CTF group: %d/%d' % (ctfN, numberOfCtfGroups))\n ctfArgs = ' -i %(inputdocfile)s -o %(outputname)s --ref %(refname)s' \n \n inputdocfile = self._getBlockFileName(ctfBlockName, ctfN, self.docFileInputAngles[iterN-1])\n outputname = self._getBlockFileName(ctfBlockName, ctfN, projMatchRootName)\n baseTxtFile = removeExt(refname)\n neighbFile = baseTxtFile + '_sampling.xmd'\n cleanPath(neighbFile)\n neighbFileb = baseTxtFile + '_group' + str(ctfN).zfill(self.FILENAMENUMBERLENGTH) + '_sampling.xmd'\n copyFile(neighbFileb, neighbFile)\n print \"copied file \", neighbFileb, \"to\", neighbFile\n \n threads = self.numberOfThreads.get()\n trhArgs = ' --mem %(mem)s --thr %(thr)s'\n thrParams = {\n 'mem' : self.availableMemory.get() * threads,\n 'thr' : threads,\n }\n \n if self.doCTFCorrection and self._referenceIsCtfCorrected[iterN]:\n ctfArgs += ' --ctf %s' % self._getBlockFileName('', ctfN, self._getFileName('stackCTFs'))\n \n progArgs = ctfArgs % locals() + args + trhArgs % thrParams\n self.runJob('xmipp_angular_projection_matching', progArgs, **kwargs)", "def run(self):\n \n for i, spl in enumerate(self.sampleList):\n id_ =MSIdentificationModel(spl, **self.parameters)\n id_.identification(self.models[i], error=5)", "def setUp(self):\n self.data_dir_qucs = os.path.dirname(os.path.abspath(__file__)) + \\\n '/qucs_prj/'\n self.data_dir_ads = os.path.dirname(os.path.abspath(__file__)) + \\\n '/ads/'\n\n self.ref_qucs = [\n {'model': 'hammerstadjensen', 'disp': 'hammerstadjensen', 'color': 'r',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,hammerstad,hammerstad.s2p'))},\n {'model': 'hammerstadjensen', 'disp': 'kirschningjansen', 'color': 'c',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,hammerstad,kirschning.s2p'))},\n {'model': 'hammerstadjensen', 'disp': 'kobayashi', 'color': 'k',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,hammerstad,kobayashi.s2p'))},\n {'model': 'hammerstadjensen', 'disp': 'yamashita', 'color': 'g',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,hammerstad,yamashita.s2p'))},\n {'model': 'wheeler', 'disp': 'schneider', 'color': 'm',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,wheeler,schneider.s2p'))},\n {'model': 'schneider', 'disp': 'schneider', 'color': 'b',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,schneider,schneider.s2p'))}\n ]\n\n\n self.ref_ads = [\n {'diel': 'frequencyinvariant', 'disp': 'kirschningjansen', 'color': 'r',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,freqencyinvariant,kirschning.s2p'))},\n {'diel': 'djordjevicsvensson', 'disp': 'kirschningjansen', 'color': 'c',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,djordjevicsvensson,kirschning.s2p'))},\n {'diel': 'frequencyinvariant', 'disp': 'kobayashi', 'color': 'k',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,freqencyinvariant,kobayashi.s2p'))},\n {'diel': 'djordjevicsvensson', 'disp': 'kobayashi', 'color': 'g',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,djordjevicsvensson,kobayashi.s2p'))},\n {'diel': 'frequencyinvariant', 'disp': 'yamashita', 'color': 'm',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,freqencyinvariant,yamashita.s2p'))},\n {'diel': 'djordjevicsvensson', 'disp': 'yamashita', 'color': 'b',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,djordjevicsvensson,yamashita.s2p'))}\n ]\n\n # default parameter set for tests\n self.verbose = False # output comparison plots if True\n self.w = 3.00e-3\n self.h = 1.55e-3\n self.t = 35e-6\n self.l = 25e-3\n self.ep_r = 4.413\n self.tand = 0.0182\n self.rho = 1.7e-8\n self.d = 0.15e-6\n self.f_et = 1e9", "def test_call_ref_only(self):\r\n\r\n fd, tmp_otu_filepath = mkstemp(\r\n prefix='ReferenceRepSetPickerTest_',\r\n suffix='.otu')\r\n close(fd)\r\n otu_file = open(tmp_otu_filepath, 'w')\r\n otu_file.write(otus_all_ref)\r\n otu_file.close()\r\n self.files_to_remove.append(tmp_otu_filepath)\r\n\r\n exp = {'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),\r\n 'ref0': ('ref0', 'CCCAAAAAAATTTTTT')}\r\n\r\n # passing only reference (not input seqs)\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(None,\r\n tmp_otu_filepath,\r\n self.ref_seq_filepath)\r\n self.assertEqual(obs, exp)\r\n\r\n # passing reference and input seqs\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath,\r\n tmp_otu_filepath,\r\n self.ref_seq_filepath)\r\n self.assertEqual(obs, exp)", "def test_call_detects_reference_chimeras(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n # Should detect and remove chimeric sequence based\r\n # during ref based detection\r\n\r\n exp_otu_ids = ['0', '1']\r\n\r\n exp_clusters = [['Solemya', 'Solemya_seq2'],\r\n ['usearch_ecoli_seq', 'usearch_ecoli_seq2']\r\n ]\r\n\r\n app = UsearchReferenceOtuPicker(\r\n params={'save_intermediate_files': False,\r\n 'db_filepath':\r\n self.tmp_ref_database,\r\n 'output_dir': self.temp_dir,\r\n 'remove_usearch_logs': True,\r\n 'reference_chimera_detection':\r\n True,\r\n 'de_novo_chimera_detection':\r\n False,\r\n 'cluster_size_filtering':\r\n False,\r\n 'minlen': 12,\r\n 'w': 12,\r\n 'minsize': 1,\r\n 'percent_id': 0.97,\r\n 'percent_id_err': 0.97,\r\n 'abundance_skew': 2\r\n })\r\n\r\n obs = app(self.tmp_seq_filepath2, self.tmp_otu_ref_database)\r\n\r\n obs_otu_ids = sorted(obs.keys())\r\n obs_clusters = sorted(obs.values())\r\n # The relation between otu ids and clusters is abitrary, and\r\n # is not stable due to use of dicts when parsing clusters -- therefore\r\n # just checks that we have the expected group of each\r\n self.assertEqual(obs_otu_ids, exp_otu_ids)\r\n self.assertEqual(obs_clusters, exp_clusters)", "def buildWorkload(self):\n self.commonWorkload()\n prodTask = self.workload.newTask(\"PrivateMC\")\n\n self.workload.setWorkQueueSplitPolicy(\"MonteCarlo\",\n self.analysisJobSplitAlgo,\n self.analysisJobSplitArgs)\n self.workload.setEndPolicy(\"SingleShot\")\n\n outputMods = self.setupProcessingTask(prodTask, \"PrivateMC\", None,\n couchURL = self.couchURL,\n couchDBName = self.couchDBName,\n configCacheUrl = self.configCacheUrl,\n configDoc = self.configCacheID,\n splitAlgo = self.analysisJobSplitAlgo,\n splitArgs = self.analysisJobSplitArgs,\n seeding = self.seeding,\n totalEvents = self.totalEvents,\n userSandbox = self.userSandbox,\n userFiles = self.userFiles)\n\n self.setUserOutput(prodTask)\n\n # Pileup configuration for the first generation task\n self.pileupConfig = parsePileupConfig(self.mcPileup, self.dataPileup)\n\n # Pile up support\n if self.pileupConfig:\n self.setupPileup(prodTask, self.pileupConfig)\n \n # setting the parameters which need to be set for all the tasks\n # sets acquisitionEra, processingVersion, processingString\n self.workload.setTaskPropertiesFromWorkload()\n\n # set the LFN bases (normally done by request manager)\n # also pass runNumber (workload evaluates it)\n self.workload.setLFNBase(self.mergedLFNBase, self.unmergedLFNBase,\n runNumber = self.runNumber)\n\n return self.workload" ]
[ "0.6475256", "0.55186105", "0.50689244", "0.5050482", "0.501838", "0.50102156", "0.49754617", "0.493003", "0.48858607", "0.48584223", "0.4837625", "0.48150113", "0.48086077", "0.47463083", "0.4714161", "0.47113824", "0.47051105", "0.47018775", "0.46838635", "0.4679192", "0.46769616", "0.4666678", "0.4666678", "0.46611026", "0.46539745", "0.46503946", "0.4648414", "0.46428192", "0.46423095", "0.4625418" ]
0.568913
1
Set which boxes are visible.
def set_visible_boxes(self, idx_box_vis): if not self.variable_vis: raise ValueError('Variable visibility must be enabled via "variable_vis"') # Find which boxes are now visible that weren't last update of 'self.visible_boxes', # and vice-versa newbox_vis = np.setdiff1d(idx_box_vis, self.visible_boxes) oldbox_vis = np.setdiff1d(self.visible_boxes, idx_box_vis) # Get the new visible vertices indexes for the faces (mesh) and outlines (border) # and the new invisible vertices indexes idx_face_vis = np.ravel(self.box_to_face[newbox_vis]) idx_outl_vis = np.ravel(self.box_to_outl[newbox_vis]) idx_face_invis = np.ravel(self.box_to_face[oldbox_vis]) idx_outl_invis = np.ravel(self.box_to_outl[oldbox_vis]) # Update mesh visibility bool array self.mesh.set_visible_faces(idx_face_vis) self.mesh.set_invisible_faces(idx_face_invis) self.mesh.update_vis_buffer() # Update border visibility bool array self.border.set_visible_faces(idx_outl_vis) self.border.set_invisible_faces(idx_outl_invis) self.border.update_vis_buffer() # Update 'self.visible_boxes' self.visible_boxes = idx_box_vis
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_visible(self, state: bool):\n self.box.set_visible(state)\n if not state:\n self.add_box.set_visible(False)", "def show_box(self, coor):\n\n for i in range(self.boxes.shape[0]):\n for j in range(self.boxes.shape[0]):\n self.boxes[i, j].set_visible(False)\n\n box = self.boxes[coor]\n isVisible = box.get_visible()\n box.set_visible(not isVisible)\n\n self.fig.canvas.draw()", "def visible_boxes(self):\n if not self.variable_vis:\n raise ValueError('Variable visibility must be enabled via \"variable_vis\".')\n return self._visible_boxes", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def set_visible(self, value):\n for artist in self.artists:\n artist.set_visible(value)", "def set_visible(self, visible):\n self._visible = visible\n for artist in self.artists:\n artist.set_visible(visible)", "def __update_visible(self) -> None:\n for i in range(0, 8):\n visible_row = self.__row_position + Labyrinth.ALL_ROW_MOVE[i]\n visible_col = self.__col_position + Labyrinth.ALL_COL_MOVE[i]\n if 0 <= visible_row < self.__labyrinth.labyrinth_height and \\\n 0 <= visible_col < self.__labyrinth.labyrinth_width:\n self.__labyrinth.visible_cells[visible_row][visible_col] = 1", "def _set_boxes(self, listOfBoxes):\n self._boxes = listOfBoxes", "def hideallstate(self):\n if self.hideallcheck.isChecked() == True:\n self.field.setOwnRobotsVisibility(False, self.index)\n self.field.setPathVisibility(False, self.index)\n self.field.setBallVisibility(False, self.index)\n self.field.setTeammateVisibility(False, self.index)\n #self.field.setPathVisibility(False, self.index)\n self.field.setOpponentVisibility(False, self.index)\n self.field.setUndefVisibility(False, self.index)\n self.ballcheck.setChecked(False)\n self.teammatecheck.setChecked(False)\n self.opponentcheck.setChecked(False)\n self.undefcheck.setChecked(False)\n self.targetcheck.setChecked(False)\n else:\n self.field.setOwnRobotsVisibility(True, self.index)\n self.field.setPathVisibility(True, self.index)\n self.field.setBallVisibility(True, self.index)\n self.field.setTeammateVisibility(True, self.index)\n #self.field.setPathVisibility(True, self.index)\n self.field.setOpponentVisibility(True, self.index)\n self.field.setUndefVisibility(True, self.index)\n self.ballcheck.setChecked(True)\n self.teammatecheck.setChecked(True)\n self.opponentcheck.setChecked(True)\n self.undefcheck.setChecked(True)\n self.targetcheck.setChecked(True)", "def visible(self, show):", "def set_visible(self, visible):\n self.widget.setVisible(visible)", "def set_visible(self, visible):\n self.widget.setVisible(visible)", "def grid_visibility(self, is_visible):\n self.__graphics_grid.set_visibility(is_visible)", "def grid_visibility(self, is_visible):\n self.__graphics_grid.set_visibility(is_visible)", "def _visibleChannels_changed(self):\n for i in range(0,8):\n if i in self.visibleChannels:\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=True\n else:\n print i\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=False", "def __grid_visibility_checkbox(self, c):\n self.grid_visibility(c.checked)\n self.__grid_visibility = c.checked", "def __grid_visibility_checkbox(self, c):\n self.grid_visibility(c.checked)\n self.__grid_visibility = c.checked", "def setGridVisible(self,visible=True):\n for line in self.items():\n if isinstance(line, QGraphicsLineItem):\n # ignore arrow\n if not hasattr(line, 'arrowHead'):\n line.setVisible(visible)", "def _set_show_hide_products(self):\n \n visible_count = 0\n\n for (counter, product) in enumerate(self.product_displays):\n\n if (counter < self.product_displays.top_index):\n # Hide all the products above the list product top\n product.set_visible(False)\n elif visible_count < self.limits.screen_products:\n # Show screen products based on their quantity\n product.visible = True\n visible_count += 1\n else:\n # Hide products below list bottom\n product.set_visible(False)", "def visible(self, visible):\n\n self._visible = visible" ]
[ "0.6871116", "0.6863543", "0.68359274", "0.68055904", "0.68055904", "0.68055904", "0.68055904", "0.68055904", "0.68055904", "0.68055904", "0.68055904", "0.68055904", "0.68055904", "0.68055904", "0.6683516", "0.663565", "0.65381724", "0.63933444", "0.63039213", "0.6302516", "0.62957644", "0.62957644", "0.6268735", "0.6268735", "0.62353605", "0.6142146", "0.6142146", "0.6104462", "0.6097313", "0.60189974" ]
0.72460145
0
Array of indexes of boxes that are currently visible.
def visible_boxes(self): if not self.variable_vis: raise ValueError('Variable visibility must be enabled via "variable_vis".') return self._visible_boxes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activeChildWellIndices(self):\n return self._activeWellIndices", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def indices(self):\n slice_list = []\n for axis in range(self.ndim):\n if axis in self.displayed:\n slice_list.append(slice(None))\n else:\n if self.clip:\n p = np.clip(\n self.point[axis],\n np.round(self.range[axis][0]),\n np.round(self.range[axis][1]) - 1,\n )\n else:\n p = self.point[axis]\n p = np.round(p / self.range[axis][2]).astype(int)\n slice_list.append(p)\n return tuple(slice_list)", "def _get_boxes(self):\n return self._boxes", "def box_to_indices(box):\r\n start_row = (box // 3) * 3\r\n start_col = (box % 3) * 3\r\n return [(row, col) for row in range(start_row, start_row + 3)\r\n for col in range(start_col, start_col + 3)]", "def open_spots(self):\n ret = []\n for i in range(1,25):\n if self.nodes[i].piece == None:\n ret.append(i)\n return ret", "def get_visible_cells(self):\r\n ux, uy = self.GetScrollPixelsPerUnit()\r\n sx, sy = self.GetViewStart()\r\n w, h = self.GetGridWindow().GetClientSize().Get()\r\n sx *= ux\r\n sy *= uy\r\n start_col = self.XToCol(sx)\r\n start_row = self.YToRow(sy)\r\n end_col = self.XToCol(sx + w, True)\r\n end_row = self.YToRow(sy + h, True)\r\n return start_row, end_row, start_col, end_col", "def available_positions(self):\n available_positions = []\n for i in range(self.positions_count):\n if self.board[i] == 0:\n available_positions.append(i+1)\n return available_positions", "def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def get_played_positions(board):\n return np.argwhere(board.state != -1)", "def get_5index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==0]", "def active_boards(self):\n return [(i, j) for i in range(self.SIZE) for j in range(self.SIZE)\n if self.boards[i][j].state == State.IN_PROGRESS]", "def get_checked_indices(self) -> List[int]:\n checked = list()\n for i in range(self.rowCount()):\n if bool(self.item(i, 0).checkState()):\n checked.append(i)\n\n return checked", "def rectangleindices(self):\n return {r.n for r in self.rectangles}", "def get_active_register_indices(self):\n assert self.sketch.ndim == 1, 'Currently only support 1-dimensional sketch.'\n return np.flatnonzero(self.sketch)", "def indices(self):\n return range(len(self))", "def childWellIndices(self):\n return self._wellIndices", "def get_checkbox_coordinates():\n boxes = []\n current_y = CHECKBOX_TOP_Y_START\n for _ in range(NUM_CHECKBOXES):\n top = current_y\n bottom = top + CHECKBOX_HEIGHT - 1\n left = CHECKBOX_LEFT_X_START\n right = CHECKBOX_RIGHT_X_END\n boxes.append((left, right, bottom, top))\n current_y += CHECKBOX_INTERTOP_DISTANCE\n return boxes", "def itervisible(self):\r\n return (x for x in self.iterall() if x.visible)", "def get_boxes(self) -> List[Box]:\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]", "def get_live_cell_coordinates(self):\n\n return np.array(np.where(self.board == 1)).transpose().tolist()", "def get_block_positions(self, fig):\n block_positions = []\n\n # Iterates through y + active_piece.y and x + active_piece.x\n for y, row in enumerate(fig, start=self.active_piece.y):\n for x, val in enumerate(row, start=self.active_piece.x):\n if val != 0:\n block_positions.append((x, y))\n\n return block_positions", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def getLandmarkindices(self):\n return self.subsetnodes_indices", "def cell_list(self):\n lst_of_idx = []\n height = self.__height\n width = self.__width\n for i in range(width):\n for j in range(height):\n lst_of_idx.append((i,j))\n lst_of_idx.append((3,7))\n return lst_of_idx", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret" ]
[ "0.667127", "0.66098076", "0.649953", "0.64284825", "0.6357638", "0.634663", "0.6343066", "0.62989444", "0.62894857", "0.62763494", "0.6264251", "0.62637955", "0.625783", "0.62535286", "0.62453705", "0.619242", "0.6191503", "0.6182959", "0.61586684", "0.6135255", "0.6127939", "0.6122209", "0.61067086", "0.6078467", "0.60387754", "0.60387754", "0.60387754", "0.6025207", "0.6019155", "0.6017842" ]
0.67173785
0
The vispy.visuals.MeshVisual that used to fill in.
def mesh(self): return self._mesh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mesh(self):\n self._ensure_mesh()\n return self._mesh", "def get_mesh(self):\n return self.mesh", "def getMesh(self):\n return self.mesh", "def show_mesh(self):\n g = self.build_gmsh()\n if g:\n mesh = cfm.GmshMesh(g)\n mesh.el_type = self.el_type\n\n mesh.dofs_per_node = self.dofs_per_node\n mesh.el_size_factor = self.el_size_factor\n self.mesh = mesh\n\n coords, edof, dofs, bdofs, elementmarkers = mesh.create()\n cfv.clf()\n\n cfv.draw_mesh(\n coords=coords,\n edof=edof,\n dofs_per_node=mesh.dofs_per_node,\n el_type=mesh.el_type,\n filled=True\n )\n if self.figure_canvas is not None:\n self.figure_canvas.draw()\n else:\n cfv.show_and_wait()\n return None\n else:\n return \"Canceled\"", "def get_mesh(self):\n tsdf_vol, color_vol = self.get_volume()\n\n # Marching cubes\n verts, faces, norms, vals = measure.marching_cubes_lewiner(tsdf_vol, level=0)\n verts_ind = np.round(verts).astype(int)\n verts = verts * self._voxel_size + self._vol_origin # voxel grid coordinates to world coordinates\n\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / self._color_const)\n colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)\n colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n return verts, faces, norms, colors", "def draw(self, color=None):\n color = color or self.color\n vertices = [list(vertex) for vertex in self.shape.vertices]\n faces = self.shape.faces\n mesh = compas_ghpython.draw_mesh(vertices,\n faces,\n color=color)\n return mesh", "def __init__(self, mesh):\n self._mesh = mesh", "def _final_mesh(self):\n assert (\n \"final_mesh\" in self.__dict__.keys()\n ), \"Final Mesh does not exist yet - please run multi-view optimization before getting\"\n return self.final_mesh", "def mesh(self, initial_obj=None):\n mesh = initial_obj if initial_obj else 0.\n for size in reversed(self.sizes):\n mesh = [mesh] * size\n return mesh", "def show(self):\n # if a blender object already exists then the mesh_grower must have been finalized;\n # in this case make a new mesh_grower and object\n viz.add_box(self.lower_vertex, self.upper_vertex)", "def _mesh(self):\n from scipy.spatial import Delaunay\n points = self.cluster.get_positions()\n delaunay = Delaunay(points)\n simplices = self._filter_max_dist_in_element(delaunay.simplices)\n delaunay.simplices = simplices\n return delaunay", "def CreateDummyLowerDimensionalMesh(self):\n\n\n sys.stdout = open(os.devnull, \"w\")\n p = self.InferPolynomialDegree()\n mesh = Mesh()\n if self.element_type == \"tet\":\n mesh.Rectangle(nx=1,ny=1, element_type=\"tri\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"hex\":\n mesh.Rectangle(nx=1,ny=1, element_type=\"quad\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n mesh.Line(n=1, p=p)\n elif self.element_type == \"line\":\n mesh.element_type = \"point\"\n mesh.nelem = 1\n mesh.nnode = 1\n mesh.degree = p\n mesh.elements = np.array([[0]])\n mesh.points = np.array([[0.,0.,0.]])\n sys.stdout = sys.__stdout__\n\n return mesh", "def test_plot_mesh(self):\n plt.close('all')\n\n #\n # Initialize\n #\n fig, ax = plt.subplots(3,3)\n plot = Plot()\n #\n # Define mesh\n # \n mesh = Mesh.newmesh(grid_size=(2,2))\n mesh.refine() \n mesh.root_node().children[1,1].mark(1)\n mesh.refine(1)\n \n # Plot simple mesh\n ax[0,0] = plot.mesh(ax[0,0], mesh)\n \n #\n # Flag a few cells\n # \n mesh.unmark(nodes=True)\n mesh.root_node().children[0,0].mark(2)\n mesh.root_node().children[1,0].mark(1)\n mesh.root_node().children[1,1].children['SW'].mark(3)\n mesh.root_node().children[1,1].children['NE'].mark(3)\n \n # Color flagged cells\n ax[0,1] = plot.mesh(ax[0,1], mesh, color_marked=[1,2,3], nested=True)\n \n # Plot vertex numbers\n ax[0,2] = plot.mesh(ax[0,2], mesh, vertex_numbers=True)\n \n # Plot edge numbers\n ax[1,0] = plot.mesh(ax[1,0], mesh, edge_numbers=True)\n \n # Plot cell numbers nested off\n mesh.refine(2)\n ax[1,1] = plot.mesh(ax[1,1], mesh, cell_numbers=True)\n \n # Plot cell numbers nested on\n ax[1,2] = plot.mesh(ax[1,2], mesh, cell_numbers=True, nested=True)\n\n # Plot dofs\n element = QuadFE(2,'Q1')\n ax[2,0] = plot.mesh(ax[2,0], mesh, element=element, dofs=True)\n \n # Assign dofs in a nested way\n ax[2,1] = plot.mesh(ax[2,1], mesh, element=element, dofs=True, \\\n nested=True)\n \n # Display only dofs of flagged nodes \n ax[2,2] = plot.mesh(ax[2,2], mesh, element=element, dofs=True, \\\n node_flag=3, nested=True, show_axis=True)", "def __init__(self, mesh: Mesh):\n self.mesh = mesh\n self.children = []", "def __init__(self, mesh: Mesh):\n self.mesh = mesh\n self.f = [0]*len(mesh.delaunay.simplices)", "def CreateDummyUpperDimensionalMesh(self):\n\n\n sys.stdout = open(os.devnull, \"w\")\n p = self.InferPolynomialDegree()\n mesh = Mesh()\n if self.element_type == \"tri\":\n mesh.Parallelepiped(nx=1,ny=1,nz=1, element_type=\"tet\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"quad\":\n mesh.Parallelepiped(nx=1,ny=1,nz=1, element_type=\"hex\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"line\":\n mesh.Rectangle(nx=1,ny=1, element_type=\"quad\")\n mesh.GetHighOrderMesh(p=p)\n sys.stdout = sys.__stdout__\n\n return mesh", "def draw_stl_from_mesh(m):\n plt.ion()\n # Create a new plot\n figure = plt.figure()\n axes = mplot3d.Axes3D(figure)\n\n # Render the cube faces\n #for m in meshes:\n axes.add_collection3d(mplot3d.art3d.Poly3DCollection(m.vectors))\n\n # Auto scale to the mesh size\n scale = m.points.flatten(-1)\n axes.auto_scale_xyz(scale, scale, scale)", "def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main", "def createMesh(objname,Vert,Edges=[],Faces=[]):\n me = bpy.data.meshes.new(objname)\n ob = bpy.data.objects.new(objname,me)\n bpy.context.scene.objects.link(ob)\n \n me.from_pydata(Vert,Edges,Faces)\n me.update(calc_edges=True)", "def mesh(self):\n return numpy.meshgrid(*self.edges, indexing='ij')", "def validate_mesh(self):\n pass", "def build_mesh(self):\n vertices = []\n indices = []\n step = 10\n istep = (pi * 2) / float(step)\n for i in range(step):\n x = 350 + cos(istep * i) * 100\n y = 350 + sin(istep * i) * 100\n vertices.extend([x, y, 0, 0])\n indices.append(i)\n return Mesh(vertices=vertices, indices=indices)", "def copy(self):\r\n return BasicMesh(self.gl_lists, list(self.pos),\r\n list(self.rotation), list(self.verts),\r\n self.scale, list(self.colorize))", "def new_mesh_set(self, all_meshes):\n if isinstance(all_meshes, Mesh):\n mesh_tp = []\n mesh_tp.append(all_meshes)\n all_meshes = mesh_tp\n\n if not isinstance(all_meshes, list):\n raise TypeError(\"Please send a list of mesh to update_mesh\")\n self.all_meshes = all_meshes\n\n # Remove previous actors from the scene\n for actor in self.mesh_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.mesh_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtkPoints()\n for i, mesh in enumerate(self.all_meshes):\n if mesh.time.size != 1:\n raise IndexError(\"Mesh should be from one frame only\")\n\n points = vtkPoints()\n for j in range(mesh.channel.size):\n # points.InsertNextPoint([0, 0, 0])\n points.InsertNextPoint(mesh.data[:3, j, 0].tolist())\n\n # Create an array for each triangle\n draw_patch = not mesh.automatic_triangles and not self.force_wireframe\n if draw_patch:\n poly_type = vtkPolygon\n n_ids = 3\n color = self.patch_color[i]\n else:\n poly_type = vtkPolyLine\n n_ids = 4\n color = self.mesh_color\n cells = vtkCellArray()\n\n # Create the polygons\n for j in range(mesh.triangles.shape[1]):\n poly = poly_type()\n poly.GetPointIds().SetNumberOfIds(n_ids) # make a tri\n for k in range(len(mesh.triangles[:, j])):\n poly.GetPointIds().SetId(k, mesh.triangles[k, j])\n if not draw_patch:\n poly.GetPointIds().SetId(3, mesh.triangles[0, j]) # Close the triangle\n cells.InsertNextCell(poly)\n\n poly_data = vtkPolyData()\n poly_data.SetPoints(points)\n if draw_patch:\n poly_data.SetPolys(cells)\n else:\n poly_data.SetLines(cells)\n\n mapper = vtkPolyDataMapper()\n mapper.SetInputData(poly_data)\n\n # Create an actor\n self.mesh_actors.append(vtkActor())\n self.mesh_actors[i].SetMapper(mapper)\n self.mesh_actors[i].GetProperty().SetColor(color)\n self.mesh_actors[i].GetProperty().SetOpacity(self.mesh_opacity)\n\n self.parent_window.ren.AddActor(self.mesh_actors[i])\n\n # Update marker position\n self.update_mesh(self.all_meshes)", "def create_grid(self):\n # Domain definition\n network = pp.FractureNetwork2d(self.frac_pts.T, self.frac_edges.T, domain=self.box)\n gb = network.mesh(self.mesh_args) \n pp.contact_conditions.set_projections(gb)\n\n self.gb = gb\n self.Nd = self.gb.dim_max()\n self._Nd = self.gb.dim_max()\n g2d = self.gb.grids_of_dimension(2)[0]\n self.min_face = np.copy(self.mesh_size) #np.min(g2d.face_areas)\n self.min_cell = np.min(g2d.cell_volumes)\n self.p, self.t = analysis.adjustmesh(g2d, self.tips, self.GAP)\n self.displacement = self.p*0\n self.fa_no = g2d.face_nodes.indices.reshape((2, g2d.num_faces), order='f').T \n return gb", "def createMesh(self, chem, coord_x_start, coord_y_start) :\r\n init_conc = .0\r\n self.compParDiff(chem)\r\n comp.Comp.createMeshHomo(self, 'SC', chem, init_conc, coord_x_start, coord_y_start)\r\n #self.meshes[0].setConc(1)\r", "def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()", "def __init__(self, mesh, V):\n super().__init__(mesh, V)\n self.bcs = None", "def create_mesh(name):\n return bpy.data.meshes.new(name)", "def __init__(self, name='mesh', cmesh=None):\n Struct.__init__(self, name=name, nodal_bcs={}, io=None)\n if cmesh is not None:\n self.cmesh_tdim = [None] * 4\n self.cmesh = self.cmesh_tdim[cmesh.tdim] = cmesh\n self._collect_descs()\n self._coors = self.cmesh.coors\n self._set_shape_info()" ]
[ "0.6806747", "0.65791893", "0.6558242", "0.6491259", "0.62664014", "0.6237129", "0.6208497", "0.615927", "0.60939723", "0.60888344", "0.60705", "0.5994552", "0.59809655", "0.5952687", "0.5920004", "0.5856578", "0.58521825", "0.5839071", "0.57246846", "0.570731", "0.56939", "0.5678031", "0.566693", "0.5663317", "0.563366", "0.5619439", "0.5610231", "0.5578679", "0.55613184", "0.55584395" ]
0.66739905
1
Plot a curve of one or more classification metrics vs. epoch.
def plot_curve(epochs, hist, list_of_metrics): # list_of_metrics should be one of the names shown in: # https://www.tensorflow.org/tutorials/structured_data/imbalanced_data#define_the_model_and_metrics plt.figure() plt.xlabel("Epoch") plt.ylabel("Value") for m in list_of_metrics: x = hist[m] plt.plot(epochs[1:], x[1:], label=m) plt.legend()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_learning_curve(self):\n\n # Loop output classes\n for c in range(1,self.n_output_classes):\n # Get data\n x_values = np.array(self.n_class_samples_list[c])\n accuracy = np.array(self.accuracy_list[c])\n precision = np.array(self.precision_list[c])\n recall = np.array(self.recall_list[c])\n F1 = np.array(self.F1_list[c])\n\n # Make plot\n with sns.axes_style(\"ticks\"):\n fig,ax = plt.subplots()\n plt.plot([np.min(x_values),np.max(x_values)],[0.5,0.5],\n color='#777777',linestyle='--')\n plt.plot([np.min(x_values),np.max(x_values)],[0.66,0.66],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.8,0.8],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.9,0.9],\n color='#777777',linestyle=':')\n\n plt.plot( x_values, accuracy, color='#000000',\n linewidth=1, label='Accuracy' )\n plt.plot( x_values, precision, color='#0000aa',\n linewidth=1, label='Precision' )\n plt.plot( x_values, recall, color='#00aa00',\n linewidth=1, label='Recall' )\n plt.plot( x_values, F1, color='#aa0000',\n linewidth=2, label='F1' )\n\n plt.yticks( [0, 0.5, 0.66, 0.8, 0.9, 1.0],\n ['0','0.5','0.66','0.8','0.9','1.0'], ha='right' )\n plt.xlim(np.max(x_values)*-0.02,np.max(x_values)*1.02)\n plt.ylim(-0.02,1.02)\n plt.xlabel('Number of training samples')\n plt.ylabel('Performance')\n plt.title('Learning curve, class {}'.format(c))\n sns.despine(ax=ax, offset=0, trim=True)\n lgnd = plt.legend(loc=4, ncol=1, frameon=True, fontsize=9)\n lgnd.get_frame().set_facecolor('#ffffff')\n ax.spines['left'].set_bounds(0,1)\n ax.spines['bottom'].set_bounds(np.min(x_values),np.max(x_values))", "def plot_accuracy(self):\n plot_title, img_title = self.prep_titles(\"\")\n test_legend = ['training data', 'test data']\n\n # Data for plotting x- and y-axis\n x = np.arange(1, CFG.EPOCHS + 1)\n y = [self.tr_accuracy, self.test_accuracy]\n\n # prints x and y-axis values\n print(f'x: {x}')\n print(f'training: {self.tr_accuracy}')\n print(f'test: {self.test_accuracy}')\n\n plt.figure(figsize=(CFG.FIG_WIDTH, CFG.FIG_HEIGHT))\n\n # Create the lineplot\n for line in range(2):\n ax = sns.lineplot(x=x, y=y[line], color=CFG.COLOR_ACCURACY[line], label=test_legend[line])\n\n if CFG.ANNOTATE:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS + 2),\n ylim=(0, 119))\n\n for line in range(2):\n for e in range(0, CFG.EPOCHS):\n if y[line][e] > CFG.ANNOTATE_LEVEL:\n value = \"{:.2f}\".format(y[line][e])\n label = \"epoch \" + str(e + 1) + \"\\n\" + value + \"%\"\n plt.annotate(label,\n xy=(x[e], y[line][e]),\n alpha=1,\n size=9,\n rotation=45,\n textcoords='offset pixels', xytext=(0, 7),\n ha='left', va='bottom')\n else:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS),\n ylim=(0, 102))\n\n ax.legend(loc='best')\n\n self.save_plot(img_title)\n plt.show()", "def plot(self, ylog=False, category=\"Accuracy\", figsize=(12, 5)):\n if self.CV == False: # no Cross Validation set case\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n plt.suptitle(\"Training Curve for \" + self.loss, fontsize=12)\n ax[0].plot(range(1, len(self.trainError) + 1), self.trainError, 'g-', label='Training Error')\n ax[0].set_xlabel('Iteration')\n ax[0].set_ylabel(\"Error\")\n if ylog == True:\n ax[0].set_yscale('log')\n ax[0].legend()\n ax[0].grid('on')\n\n if category == \"Accuracy\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), self.trainAcc, 'r-', label='Training Accuracy')\n ax[1].set_ylabel(\"Accuracy\")\n elif category == \"Error Rate\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), 1 - np.array(self.trainAcc), 'r-', label='Training Error Rate')\n ax[1].set_ylabel(\"Error Rate\")\n # ax[1].set_ylim((0, 1))\n ax[1].set_xlabel('Iteration')\n ax[1].legend(loc='best')\n ax[1].grid('on')\n plt.show()\n if self.CV == True: # has Cross Validation set case\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n plt.suptitle(\"Training Curve for \" + self.loss, fontsize=12)\n ax[0].plot(range(1, len(self.trainError) + 1), self.trainError, 'g-', label='Training Error')\n ax[0].plot(range(1, len(self.cvError) + 1), self.cvError, 'r-', label='CV Error')\n ax[0].set_xlabel('Iteration')\n ax[0].set_ylabel(\"Error\")\n if ylog == True:\n ax[0].set_yscale('log')\n ax[0].legend()\n ax[0].grid('on')\n\n if category == \"Accuracy\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), self.trainAcc, 'g-', label='Training Accuracy')\n ax[1].plot(range(1, len(self.cvAcc) + 1), self.cvAcc, 'r-', label='CV Accuracy')\n ax[1].set_ylabel(\"Accuracy\")\n elif category == \"Error Rate\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), 1 - np.array(self.trainAcc), 'g-', label='Training Error Rate')\n ax[1].plot(range(1, len(self.cvAcc) + 1), 1 - np.array(self.cvAcc), 'r-', label='CV Error Rate')\n ax[1].set_ylabel(\"Error Rate\")\n # ax[1].set_ylim((0, 1))\n ax[1].set_xlabel('Iteration')\n ax[1].legend(loc='best')\n ax[1].grid('on')\n plt.show()\n\n return fig, ax", "def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):\n \n green = '#72C29B'\n orange = '#FFA577'\n \n with plt.xkcd():\n # plot model loss\n fig, ax1 = plt.subplots()\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n # plot model accuracy\n fig, ax2 = plt.subplots()\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n plt.show()", "def plot_curve(epochs, hist, list_of_metrics):\n # list_of_metrics should be one of the names shown in:\n # https://www.tensorflow.org/tutorials/structured_data/imbalanced_data#define_the_model_and_metrics\n\n plt.figure()\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Value\")\n\n for m in list_of_metrics:\n x = hist[m]\n plt.plot(epochs[1:], x[1:], label=m)\n\n plt.legend()\n plt.show()", "def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)", "def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc, save_figure_path):\n\n green = '#72C29B'\n orange = '#FFA577'\n\n with plt.xkcd():\n fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n ax1.set_title('Model loss through #epochs', fontweight='bold')\n\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n ax2.set_title('Model accuracy through #epochs', fontweight='bold')\n\n plt.tight_layout()\n plt.show()\n fig.savefig(save_figure_path)\n plt.close(fig)", "def cross_validation_visualization_accuracy(epochs, accs, save=False, filename=\"cross_validation_acc\"):\n plt.plot(epochs, accs, marker=\".\", color='r', label='accuracy')\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)", "def train_nn(train_nn_results, label, title, yaxis):\n plt.figure(figsize=(12,5))\n for i in range(len(label)):\n plt.plot(train_nn_results[i], label=label[i], alpha=0.75)\n plt.title(title)\n plt.xlabel('epoch')\n plt.ylabel(yaxis)\n plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')\n plt.tight_layout()\n plt.show()", "def plot(training_losses, validation_losses, epochs, directory_name):\n plt.figure(figsize=(20, 10))\n\n x = np.linspace(1, epochs, epochs)\n training_losses = np.array(training_losses)\n validation_losses = np.array(validation_losses)\n\n plt.title(\"Learning curve over Epochs\")\n\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Average Loss\")\n\n plt.plot(x, training_losses, color='purple', marker=\".\", label='Training loss')\n plt.plot(x, validation_losses, color='orange', marker=\".\", label='Validation loss')\n plt.legend()\n plt.savefig('./' + directory_name + '/Learning_curves-' + str(epochs) + '.png')\n pass", "def plot_data(x, y, epochs):\n\n fig = plt.figure()\n ax = fig.gca()\n\n ax.set_ylim(0, int(np.max(y)+0.5))\n ax.set_xlim(0, np.max(x))\n ax.yaxis.grid(True)\n ax.grid(which='minor', axis='x', alpha=0.2)\n ax.grid(which='major', axis='x', alpha=0.5)\n major_ticks = np.arange(0, np.max(x), 88)\n minor_ticks = np.arange(0, np.max(x), 16)\n ax.set_xticks(major_ticks)\n ax.set_xticks(minor_ticks, minor=True)\n\n fig.canvas.draw()\n labels = [\"{:2d}\".format(int(int(item.get_text())/88)) for item in ax.get_xticklabels()]\n ax.set_xticklabels(labels)\n\n plt.title(\"Model Loss over {} Epochs\".format(epochs))\n plt.scatter(x, y, s=50, alpha=0.5, label='cross_entropy')\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend(loc='upper right')\n plt.show()", "def plot_acc(acc_v, acc_t, save_plots_path):\n\n plt.figure()\n plt.plot(acc_v, label='Validation acc')\n plt.plot(acc_t, label='Training acc')\n plt.legend()\n title = 'Accuracy per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.savefig(save_plots_path + \"swag_accuracy_plot.png\")", "def train_visualization(output_path): \n log_path = output_path + 'output.log'\n Train_Cost, Valid_Cost, Test_Cost, Train_Acc, Valid_Acc, Test_Acc = log_reader(log_path)\n n_epoch = len(Train_Cost)\n\n x1 = range(n_epoch)\n x2 = range(n_epoch)\n y1 = Train_Cost\n y2 = Valid_Cost\n y3 = Test_Cost\n y4 = Train_Acc\n y5 = Valid_Acc\n y6 = Test_Acc\n plt.subplot(2, 1, 1)\n plt.plot(x1, y1, label=\"Train_Cost\", linewidth=2)\n plt.plot(x1, y2, label=\"Valid_Cost\", linewidth=2)\n plt.plot(x1, y3, label=\"Test_Cost\", linewidth=2)\n\n plt.title('binary cross entropy vs. epoches')\n plt.ylabel('binary cross entropy')\n plt.legend(loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(x2, y4, label=\"Train_Acc\", linewidth=2)\n plt.plot(x2, y5, label=\"Valid_Acc\", linewidth=2)\n plt.plot(x2, y6, label=\"Test_Acc\", linewidth=2)\n plt.xlabel('Accuracy@20 vs. epoches')\n plt.ylabel('Accuracy@20')\n plt.legend(loc='best')\n plt.savefig(output_path + 'loss_fig.png')\n # plt.show()", "def plot_training_curve(path):\n import matplotlib.pyplot as plt\n train_err = np.loadtxt(\"{}_train_err.csv\".format(path))\n val_err = np.loadtxt(\"{}_val_err.csv\".format(path))\n train_loss = np.loadtxt(\"{}_train_loss.csv\".format(path))\n val_loss = np.loadtxt(\"{}_val_loss.csv\".format(path))\n plt.title(\"Train vs Validation Error\")\n n = len(train_err) # number of epochs\n plt.plot(range(1,n+1), train_err, label=\"Train\")\n plt.plot(range(1,n+1), val_err, label=\"Validation\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Error\")\n plt.legend(loc='best')\n plt.show()\n plt.title(\"Train vs Validation Loss\")\n plt.plot(range(1,n+1), train_loss, label=\"Train\")\n plt.plot(range(1,n+1), val_loss, label=\"Validation\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend(loc='best')\n plt.show()", "def plot_train_results(metrics2record, loss_metric,\n train_metrics, test_metrics):\n pyplot.figure(figsize=(10, 5))\n min_, max_ = np.min(loss_metric), np.max(loss_metric)\n lg, = pyplot.plot(loss_metric)\n pyplot.yticks(min_ + np.arange(5) * (max_ - min_))\n # if learning_rate is not None:\n # lg, = pyplot.plot(learning_rate)\n pyplot.title('Loss')\n pyplot.xlabel('Epoch')\n pyplot.yscale('log')\n pyplot.show()\n\n for prm in basic_metrics:\n if prm in metrics2record:\n leg = []\n met_idx = metrics2record.index(prm)\n pyplot.figure(figsize=(10, 5))\n lg, = pyplot.plot(train_metrics[:, met_idx], label=('train'))\n leg.append(lg)\n lg, = pyplot.plot(test_metrics[:, met_idx], label=('test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title(prm)\n pyplot.xlabel('Epoch')\n pyplot.show()\n\n has_prf = any([(prm in PRF_metrics) for prm in metrics2record])\n if has_prf:\n pyplot.figure(figsize=(10, 5))\n leg = []\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(train_metrics[:, met_idx],\n label=(prm + ':train'))\n leg.append(lg)\n\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(test_metrics[:, met_idx],\n label=(prm + ':test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title('Precision / Recall')\n pyplot.xlabel('Epoch')\n pyplot.show()", "def plot_on_ax(ax, trn_ls, val_ls, ylabel=\"Accuracy\"):\n ax.plot(trn_ls, 'o-', label='Training')\n ax.plot(val_ls, 'x-', label='Validation')\n ax.set_xlabel('Epochs')\n ax.set_ylabel(ylabel)\n ax.legend()", "def plot_loss_curves(results):\n loss = results[\"train_loss\"]\n test_loss = results[\"test_loss\"]\n\n accuracy = results[\"train_acc\"]\n test_accuracy = results[\"test_acc\"]\n\n epochs = range(len(results[\"train_loss\"]))\n\n plt.figure(figsize=(15, 7))\n\n # Plot loss\n plt.subplot(1, 2, 1)\n plt.plot(epochs, loss, label=\"train_loss\")\n plt.plot(epochs, test_loss, label=\"test_loss\")\n plt.title(\"Loss\")\n plt.xlabel(\"Epochs\")\n plt.legend()\n\n # Plot accuracy\n plt.subplot(1, 2, 2)\n plt.plot(epochs, accuracy, label=\"train_accuracy\")\n plt.plot(epochs, test_accuracy, label=\"test_accuracy\")\n plt.title(\"Accuracy\")\n plt.xlabel(\"Epochs\")\n plt.legend()", "def plot_training_history(history, metric):\n \n val_metric = 'val_'+metric\n acc = history.history[metric]\n val_acc = history.history[val_metric]\n \n loss = history.history['loss']\n val_loss = history.history['val_loss']\n \n epochs_range = history.epoch\n \n plt.figure(figsize=(8, 8))\n plt.subplot(2, 1, 1)\n plt.plot(epochs_range, acc, label='Training Acc.')\n plt.plot(epochs_range, val_acc, label='Validation Acc.')\n plt.legend(loc='best',)\n plt.title('Training and Validation Accuracy')\n \n plt.subplot(2, 1, 2)\n plt.plot(epochs_range, loss, label='Training Loss')\n plt.plot(epochs_range, val_loss, label='Validation Loss')\n plt.legend(loc='best')\n plt.title('Training and Validation Loss')\n plt.show()", "def create(self, train: List[float], validation: List[float]) -> None:\n self.ax.plot(train)\n self.ax.plot(validation)\n self.ax.set_xlabel('epochs')\n if self.loss:\n self.ax.set_ylabel('loss')\n else:\n self.ax.set_ylabel('accuracy')\n self.ax.legend(['train', 'validation'])", "def plot_curve(self):\n x1 = np.arange(self.init_epoch, self.params.num_epoch+1, dtype=np.int).tolist()\n x2 = np.linspace(self.init_epoch, self.epoch,\n num=(self.epoch-self.init_epoch)//self.params.val_every+1, dtype=np.int64)\n plt.plot(x1, self.train_loss, label='train_loss')\n plt.plot(x2, self.val_loss, label='val_loss')\n plt.legend(loc='best')\n plt.title('Train/Val loss')\n plt.grid()\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()", "def plot_eval_3(trained_model, X_val, y_val, image_name):\n # FOR EACH CLASS\n # val_pred = trained_model.predict_proba(X_val, num_iteration=iteration)\n \n iterations = trained_model.booster_.current_iteration()\n# results = np.zeros((2, iterations))\n results = np.zeros((iterations,))\n for pos in range(iterations):\n \n # Calculate the current iteration (from 1 to iterations)\n iteration = pos + 1\n \n # Predict validation set for the current iteration\n# start_time = timeit.default_timer()\n val_pred = trained_model.predict(X_val, num_iteration=iteration)\n# end_time = timeit.default_timer()\n# time = end_time - start_time\n# speed = int(X_val.shape[0] / time)\n \n # Number of hits\n val_ok = (val_pred == y_val)\n \n # Percentage of hits\n val_acc = val_ok.sum() / val_ok.size\n \n # Actualize data for plotting results\n# results[0][pos] = time\n# results[1][pos] = val_acc\n results[pos] = val_acc\n \n # Generate accuracy plot\n plt.figure()\n# plt.plot(results[0], results[1], 'b')\n plt.plot(results, 'b')\n plt.title('Validation accuracy')\n plt.xlabel('iterations')\n plt.ylabel('accuracy')\n plt.legend()\n \n # Save validation plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_val_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')", "def main():\n args = parse_args()\n\n with open(args.train_details_json, mode='r', encoding='utf-8') as json_f:\n results_dict = json.load(json_f)[-1]\n\n losses_plot = plt.figure()\n plt.plot(range(1, len(results_dict['train_loss']) + 1),\n results_dict['train_loss'])\n plt.plot(range(1, len(results_dict['val_loss']) + 1),\n results_dict['val_loss'])\n plt.plot(range(1, len(results_dict['test_loss']) + 1),\n results_dict['test_loss'])\n plt.legend(['train', 'val', 'test'])\n plt.title(f'loss vs epoch for {args.model} model on {args.dataset} dataset')\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.grid(True)\n losses_plot.set_size_inches((8, 8))\n losses_plot.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_losses_plot.png'))\n\n accuracies_plot = plt.figure()\n plt.plot(range(1, len(results_dict['train_acc']) + 1),\n results_dict['train_acc'])\n plt.plot(range(1, len(results_dict['val_acc']) + 1),\n results_dict['val_acc'])\n plt.plot(range(1, len(results_dict['test_acc']) + 1),\n results_dict['test_acc'])\n plt.legend(['train', 'val', 'test'])\n plt.title(f'accuracy vs epoch for {args.model} '\n f'model on {args.dataset} dataset')\n plt.xlabel('epoch')\n plt.ylabel('accuracy')\n plt.grid(True)\n accuracies_plot.set_size_inches((8, 8))\n accuracies_plot.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_accuracies_plot.png'))", "def plot(self, epochs, title=\"Learning Rate Schedule\"):\n lrs = [self(i) for i in epochs]\n\n # plot the learning rate schedule\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(epochs, lrs)\n plt.title(title)\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Learning Rate\")\n plt.close()", "def plot_training_history(history):\n fig, (ax_loss, ax_acc) = plt.subplots(1, 2, figsize=(15, 5))\n ax_loss.plot(history.epoch, history.history[\"loss\"], label=\"Train loss\")\n ax_loss.plot(history.epoch, history.history[\"val_loss\"], label=\"Validation loss\")\n ax_loss.legend()\n ax_acc.plot(history.epoch, history.history[\"iou_score\"], label=\"Train iou\")\n ax_acc.plot(history.epoch, history.history[\"val_iou_score\"], label=\"Validation iou\")\n ax_acc.legend()", "def visualize_train_history(history):\n cat_acc = history.history['categorical_accuracy']\n val_cat_acc = history.history['val_categorical_accuracy']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(1, len(cat_acc) + 1)\n\n plt.plot(epochs, cat_acc, 'bo', label='Training cat_acc')\n plt.plot(epochs, val_cat_acc, 'b', label='Validation cat_acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n\n plt.figure()\n\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n\n plt.show()", "def plot_accuracy(model_fit, save_folder): \n train_acc = model_fit.history['binary_accuracy']\n val_acc = model_fit.history['val_binary_accuracy']\n epoch_axis = np.arange(1, len(train_acc) + 1)\n plt.title('Train vs Validation Accuracy')\n plt.plot(epoch_axis, train_acc, 'b', label='Train Acc')\n plt.plot(epoch_axis, val_acc,'r', label='Val Acc')\n plt.xlim([1, len(train_acc)])\n plt.xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_acc) / 10) + 0.5)))\n plt.legend(loc='lower right')\n plt.ylabel('Accuracy')\n plt.xlabel('Epochs')\n plt.savefig(save_folder + '/accuracy.png')\n plt.show()\n plt.close()", "def cross_validation_visualization_accuracy_multiple(epochs, accs, save=False, filename=\"cross_validation_acc_multiple\"):\n \n for i in range(accs.shape[0]):\n plt.plot(epochs, accs[i], marker=\".\", color='r', label=str(i+1)+'th accuracy')\n \n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)", "def plot_results(self):\n\n\n f1, ax1 = plt.subplots()\n h1, = ax1.plot(self.history[\"step\"], self.history[\"trainLoss\"],\\\n \"b-\", label=\"Loss - Train\")\n h2, = ax1.plot(self.history[\"step\"], self.history[\"validLoss\"],\\\n \"b.\", label=\"Loss - Validation\")\n\n ax1.set_ylabel(\"Loss\", color = \"blue\")\n ax1.tick_params(\"y\", color = \"blue\")\n ax1.yaxis.label.set_color(\"blue\")\n ax1.set_xlabel(\"Training Steps [{}]\".format(self.FLAGS.eval_every))\n\n ax2 = ax1.twinx()\n h3, = ax2.plot(self.history[\"step\"], self.history[\"trainAccr\"], \"r-\",\\\n label = \"Accuracy - Train\")\n h4, = ax2.plot(self.history[\"step\"], self.history[\"validAccr\"], \"r.\",\\\n label = \"Accuracy - Validation\")\n\n ax2.set_ylabel(\"Accuracy\", color = \"red\")\n ax2.tick_params(\"y\", color = \"red\")\n ax2.yaxis.label.set_color(\"red\")\n\n hds = [h1,h2,h3,h4]\n lbs = [l.get_label() for l in hds]\n ax1.legend(hds, lbs)\n f1.tight_layout()\n plt.savefig(\"trainingHistory.png\")\n\n plt.close(f1)\n #plt.show()", "def plot_training_info(case, metrics, save, history):\n val = False\n if 'val_accuracy' in history and 'val_loss' in history:\n val = True\n plt.ioff()\n if 'accuracy' in metrics:\n fig = plt.figure()\n plt.plot(history['accuracy'])\n if val:\n plt.plot(history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'accuracy.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)\n\n # summarize history for loss\n if 'loss' in metrics:\n fig = plt.figure()\n plt.plot(history['loss'])\n if val:\n plt.plot(history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n # plt.ylim(1e-3, 1e-2)\n plt.yscale(\"log\")\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'loss.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)", "def plot_mean_roc_curve_of_classifiers(classifier_roc_list, data_set_description):\n if const.RECORD_RESULTS is True:\n fig = plt.figure(figsize=(8, 6.66))\n monochrome = (cycler(\"color\", [\"k\"]) * cycler(\"marker\", [\"\"]) *\n cycler(\"linestyle\", [\"-\", \"--\", \"-.\"]))\n color_arr = [\"#64B3DE\", \"#1f78b4\", \"#6ABF20\", \"#FBAC44\", \"#bc1659\", \"#B9B914\", \"#33a02c\", \"#ff7f00\", \"#6a3d9a\", \"black\", \"#b15928\", \"#e31a1c\"]\n plt.rc(\"axes\", prop_cycle=monochrome)\n line_style_index = 0\n color_index = 0\n\n for (test_run_roc_list, classifier_description) in classifier_roc_list:\n if not (None, None) in test_run_roc_list[0]:\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n count = 0\n for roc_list in test_run_roc_list:\n for (tpr, fpr) in roc_list:\n mean_tpr += interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n count += 1\n\n mean_tpr /= float(count)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n line_width = 0.5\n if line_style_index == 1:\n line_width = 0.8\n elif line_style_index == 2:\n line_width = 1.5\n\n plt.plot(mean_fpr, mean_tpr, c=color_arr[color_index], lw=line_width, alpha=1, label=\"{0} ({1:.3f})\".format(classifier_description, mean_auc))\n line_style_index = (line_style_index + 1) % 3\n color_index += 1\n\n plt.locator_params(axis='x', nbins=10)\n plt.locator_params(axis='y', nbins=10)\n plt.plot([0, 1], [0, 1], \"k--\", label=\"Random classification\", lw=0.8)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"ROC curve for each classifier\")\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}_roc_classifier_plot_{1}.png\".format(data_set_description, current_time), bbox_inches=\"tight\")\n plt.close(fig)" ]
[ "0.74008095", "0.7243723", "0.7237462", "0.7235167", "0.7196403", "0.7191028", "0.7170476", "0.7084031", "0.70716345", "0.70501333", "0.7032067", "0.70216435", "0.7007549", "0.69880277", "0.6926381", "0.6898733", "0.68972385", "0.6889812", "0.686029", "0.6845836", "0.68317056", "0.68301404", "0.6829821", "0.68044955", "0.68033344", "0.6792626", "0.67918986", "0.67843914", "0.6777095", "0.67500335" ]
0.73830307
1
if user enters an amount that is not enough, they should not be given the item and their money should be returned
def test_amount_not_enough(self): item, change, _ = give_item_and_change('coke', .50) self.assertIsNone(item) self.assertEqual(change, 0.5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def amount_entered():\n while True: #Run until a suitable input is passed.\n try:\n amt = int(input(\"Enter value you wish to trade >>> \"))\n if amt <= 0:\n raise Exception\n return amt\n except ValueError: #if a string is entered\n print(\"Please enter an integer\")\n except Exception: #if a negative digit is entered\n print(\"Value cannot be less than or equal to 0\")", "def pay_for_item(self, item):\n while self.amount < item.price:\n paid_amount = float(input(f\"Pay €{round((item.price - self.amount), 2)} : \"))\n if paid_amount <= 0:\n custom_log(\"Invalid amount entered.\", MSG_ERROR)\n continue\n self.amount = self.amount + paid_amount", "def check_money(drink, amount):\n if (drink == \"espresso\" and amount < MENU[drink][\"cost\"]) or (drink == \"latte\" and amount < MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount < MENU[drink][\"cost\"]):\n # if not enough money, start over\n print(f\"Sorry that's not enough money. Drink is ${MENU[drink]['cost']}. You gave ${amount}. Money refunded.\")\n return False\n else:\n return True", "def validate_bet(buy_type, cash_in):\n while cash_in < 0:\n print(\"Invalid\", buy_type)\n cash_in = round(float(input(\"Enter \" + buy_type + \": $\")), 2)\n\n return cash_in", "def get_amount():\n while True:\n try:\n amount = input(\"How much did they donate: \")\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print(\"you have made an invalid choice, try again.\")", "def prompt_user_money_to_withdrawl():\n print('What amount of money do you want to withdrawl?:')\n return input()", "def check_required_change(drink, amount):\n if (drink == \"espresso\" and amount > MENU[drink][\"cost\"]) or (drink == \"latte\" and amount > MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount > MENU[drink][\"cost\"]):\n return amount - MENU[drink][\"cost\"]\n else:\n return 0.00", "def get_user_input():\n return float(input('Your transaction amount please: '))", "def get_price():\n\n while (True):\n price = input(\"Enter the purchase price (xx.xx) or 'q' to quit: \")\n if(price.capitalize() == 'Q'):\n return -1\n elif price.replace('.', '').isdigit() and not is_valid(price):\n print(\"Illegal price: Must be a non-negative multiple of 5 cents.\\n\")\n elif not price.replace('.', '').isdigit():\n print(\"Illegal entry: Must be a price like (1.75) or 'q' for quit.\\n\")\n else:\n return float(price)", "def prompt_user_money_to_deposit():\n print('What amount of money do you want to deposit?:')\n return input()", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', '1.00 .50')\n self.assertIsNone(item)\n self.assertEqual(change, 1.35)", "def get_bill_amt():\n\n return float(input(\"How much was your total bill: \"))", "def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")", "def test_has_enough_money_handles_insufficient_funds(self):\n # Params\n f_money_collected = 2.00\n f_chocolate_price = 2.25\n\n # Returns\n return_1 = 'Insufficient funds... Dispensing coins inserted.\\n'\n\n # Calls\n string_1 = has_enough_money(f_money_collected, f_chocolate_price)\n\n # Asserts\n self.assertEqual(string_1, return_1)", "def take(self, desired_amount):\n if self.amount >= desired_amount:\n grab = desired_amount\n else:\n grab = min(desired_amount, self.amount)\n self.amount -= grab\n print(f\"{self} {self.amount} of supplies left\")\n return grab", "def withdraw(self, amount):\n if amount < 0:\n return \"Amount must be >= 0\"\n elif self._balance < amount:\n return \"Insufficient funds\"\n else:\n self._balance -= amount\n return None", "def valid_input():\n valid = False\n while not valid:\n principal = float(input(\"Please enter principal amount: $\"))\n if principal < 0 or principal > 1000000:\n print(\"Invalid amount. \", end=\"\")\n print(\"Principal must be between $0 and $1,000,000.00\")\n else:\n valid = True\n valid = False\n while not valid:\n interest = float(input(\"Please enter interest rate: %\"))\n if interest < 0 or interest > 100:\n print(\"Invalid rate. Interest rate must be between 0 and 100\")\n else:\n valid = True\n return principal, interest", "def test_collect_money_handles_value_error(self):\n # Params\n f_max_value = 100.00\n f_quarters = 'k'\n f_dimes = 1\n f_nickels = 5\n\n # Returns\n return_1 = 'Please enter valid currency.\\n'\n\n # Calls\n string_1 = collect_money(f_max_value, f_quarters, f_dimes, f_nickels)\n\n # Asserts\n self.assertEqual(string_1, return_1)", "def transaction_successful(drink_type):\r\n total = 0\r\n cost = MENU[drink_type][\"cost\"]\r\n print(f\" A {drink_type} costs ${MENU[drink_type]['cost']}\")\r\n total += float(input(\" How many quarters? \")) * 0.25\r\n total += float(input(\" How many dimes? \")) * 0.10\r\n total += float(input(\" How many nickels? \")) * 0.05\r\n total += float(input(\" How many pennies? \")) * 0.01\r\n\r\n if total >= cost:\r\n print(f\"Here is ${total - cost} in change.\")\r\n return True\r\n else:\r\n print(\"Sorry that's not enough money. Money refunded.\")\r\n return False", "def check_risk(self, action, amount=None):\n if amount is None:\n # amount not specified, so determines max amount to trade\n if action == 'buy':\n amount = int((self.upper_bound-self.owned_value)/self.price) # A unit is 1 dollar here? TODO:\n elif action == 'sell':\n amount = int((self.lower_bound-self.owned_value)/self.price)\n else:\n raise ValueError(f\"action should be buy or sell, got {action}\")\n if action == 'buy':\n if self.owned_value + amount <= self.upper_bound:\n # Allowed to buy up to upper bound\n return True, amount\n else:\n # Trying to buy too much\n print(\"Trade not allowed, attempting to increase total amount to more than upper bound.\")\n return False, amount\n elif action == 'sell':\n if self.owned_value + amount >= self.lower_bound:\n # Allowed to buy down to lower_bound\n return True, amount\n else:\n print(\"Trade not allowed, attempting to increase debt to more than lower bound.\")\n return False, amount", "def check_user_has_enough_money(session, user_id, amount):\n user_funds = get_user_balance(session, user_id)\n if user_funds + amount < 0:\n raise NotEnoughMoneyException(\"Not enough money in your wallet!\")", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def price_food():\r\n try:\r\n price = input(\"What is your budget? Enter a dollar amount(5.50) >\")\r\n price = price.replace('$', '')\r\n price = float(price)\r\n print('\\n')\r\n if price < 6:\r\n if price >= 5:\r\n print(\"Canteen 1 and Quad Cafe have food under 5 dollars\\n\\n\")\r\n\r\n if price >= 4:\r\n print(\"Canteen 2, Canteen 11, Canteen 16 and North Spine Food Court have food under 4 dollars\\n\\n\")\r\n\r\n if price >= 3:\r\n print(\"Canteen 9, Canteen 13, Canteen 14 and South Spine Food Court have food under 3 dollars\\n\\n\")\r\n\r\n else:\r\n print(\"Price is too low, please try another value.\\n\\n\")\r\n price_food()\r\n\r\n else:\r\n print(\"Any canteen's available for you!\\n\\n\")\r\n print('\\n')\r\n\r\n except ValueError:\r\n print(\"Please enter a dollar value\\n\\n\")\r\n price_food()", "def withdraws(account):\r\n limit = 500\r\n print(\"Your account balance is $\", format(account, \"0.2f\"), sep='')\r\n print(\"Your withdraw limit is $\", format(limit, \"0.2f\"), sep='')\r\n while True:\r\n try:\r\n withdraw_amount = int(input(\"Enter withdraw amount. $\"))\r\n break\r\n except ValueError:\r\n print(\"Error. Must be a whole number.\")\r\n # Checking if the customer has sufficient funds/over daily limit\r\n while withdraw_amount > account or withdraw_amount > limit:\r\n print(\"Insufficient funds or daily limit exceeded.\")\r\n while True:\r\n try:\r\n withdraw_amount = int(\r\n input(\"Enter withdraw amount. $\"))\r\n break\r\n except ValueError:\r\n print(\"Error. Must be a whole number.\")\r\n account -= withdraw_amount\r\n limit -= withdraw_amount\r\n print(\"Your new balance is $\", format(account, \"0.2f\"), sep='')\r\n print(\"Your new limit is $\", format(limit, \"0.2f\"), sep='')", "def buy_item(self, item):\n if self.amount < item.price:\n custom_log(\"Insufficient amount. Insert more coins.\", MSG_ERROR)\n else:\n self.amount = round((self.amount - item.price), 2)\n item._buy()\n custom_log(f\"You bought - {item.name}, remaining cash - €{self.amount}\", MSG_DEBUG)", "def get_tx_amount():\n return float(input(\"Enter Transaction Amount: \"))", "def check_amount_validity(self, amount):\r\n\r\n alert = \"Not a valid amount. Please try again!\"\r\n\r\n if type(amount) == int or type(amount) == float:\r\n return amount\r\n else:\r\n return alert", "def clean_amount(self):\n if self.payer_channel == 2: # ignore balance check if not using wallet\n return self.cleaned_data['amount']\n else:\n pay_amount = self.cleaned_data.get('amount')*100\n payer_wallet = Wallet.objects.filter(wallet_id=self.cleaned_data.get('payer_method')).first()\n if payer_wallet is None:\n raise forms.ValidationError(\n self.error_messages['payer wallet unavailable'],\n code='payer wallet unavailable'\n )\n else:\n payer_balance = payer_wallet.balance\n if pay_amount > payer_balance:\n raise forms.ValidationError(\n self.error_messages['no_enough_balance'],\n code='no_enough_balance'\n )\n else:\n return self.cleaned_data['amount']", "def is_sufficient(money_received, price):\n if price <= money_received:\n change = round(money_received - price, 2)\n print(f\"Here is your {option}.Enjoy!\\nHere us £{change} in change\")\n global profit\n profit += price\n return True\n else:\n print(f\"Sorry not enough money\")\n return False", "def options_to_withdraw(self, amount):\n counter = PaperMoneyCounter() # aux class\n options = [] # options to withdraw\n remaining_cash = 0 # aux var\n\n if (amount % 20 == 0 or amount % 50 == 0) and (amount <= 1000): # is it allowed to withdraw?\n # prioritizing 100-dollar bills\n qtt_100s = counter.how_many_100s(amount)\n remaining_cash = counter.remaining_cash_without_100s(amount)\n\n qtt_50s = counter.how_many_50s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_50s(remaining_cash)\n\n qtt_20s = counter.how_many_20s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_20s(remaining_cash)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n # prioritizing 50-dollar bills\n qtt_100s = 0\n\n qtt_50s = counter.how_many_50s(amount)\n remaining_cash = counter.remaining_cash_without_50s(amount)\n\n qtt_20s = counter.how_many_20s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_20s(remaining_cash)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n if not(options[0] == [qtt_100s, qtt_50s, qtt_20s]):\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n # prioritizing 20-dollar bills\n qtt_100s = 0\n\n qtt_50s = 0\n\n qtt_20s = counter.how_many_20s(amount)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n if not(options[0] == [qtt_100s, qtt_50s, qtt_20s]):\n if not(options[1] == [qtt_100s, qtt_50s, qtt_20s]):\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n return options\n\n return None # if it wasn't allowed to withdraw" ]
[ "0.70608425", "0.7048668", "0.6820923", "0.6761242", "0.6565699", "0.6541074", "0.6474827", "0.646314", "0.64538455", "0.64456546", "0.6440168", "0.6427558", "0.6369602", "0.6368873", "0.63403964", "0.6331443", "0.62916195", "0.6288129", "0.6280676", "0.6277007", "0.62600404", "0.6253467", "0.6253282", "0.6253223", "0.6219313", "0.62143314", "0.6200765", "0.61979216", "0.6163818", "0.6140646" ]
0.7405286
0
only to be called for contextless purposes. in templates, use is_votable
def is_votable_slow(self): return self.is_votable and not self._current_user_vote
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_voters():", "def get_voters():", "def _vote(self, team):\r\n return True", "def can_vote(age):\n return age >= 18", "def can_view(self, user):\r\n return True", "def can_be_viewed_by(self,user):\n return True", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n review_request = context.get('review_request')\n user = request.user\n\n return (user.is_authenticated and\n review_request is not None and\n review_request.public and\n not is_site_read_only_for(user) and\n super().should_render(context=context))", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n review_request = context.get('review_request')\n user = request.user\n\n return (user.is_authenticated and\n review_request is not None and\n review_request.public and\n not is_site_read_only_for(user) and\n super().should_render(context=context))", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n review_request = context.get('review_request')\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.public and\n not is_site_read_only_for(context['request'].user))", "def is_votable(source):\n if isinstance(source, str):\n source = os.path.expanduser(source)\n try:\n with iterparser.get_xml_iterator(source) as iterator:\n for start, tag, d, pos in iterator:\n if tag != \"xml\":\n return False\n break\n\n for start, tag, d, pos in iterator:\n if tag != \"VOTABLE\":\n return False\n break\n\n return True\n except ValueError:\n return False", "def get_viewable(self, user):\n return True", "def test_func(self):\n answer = self.get_object()\n return True if self.request.user == answer.author or self.request.user.is_superuser else False", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n review_request = context.get('review_request')\n perms = context.get('perms')\n user = request.user\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.status == ReviewRequest.PENDING_REVIEW and\n not is_site_read_only_for(user) and\n (request.user.pk == review_request.submitter_id or\n (bool(perms) and\n perms['reviews']['can_change_status'] and\n review_request.public)))", "def is_review_permitted(self, user):\n if user.is_authenticated or settings.OSCAR_ALLOW_ANON_REVIEWS:\n return not self.has_review_by(user)\n else:\n return False", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n review_request = context.get('review_request')\n perms = context.get('perms')\n user = request.user\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.status == ReviewRequest.PENDING_REVIEW and\n not is_site_read_only_for(user) and\n (user.pk == review_request.submitter_id or\n (bool(perms) and perms['reviews']['can_edit_reviewrequest'])))", "def is_lyrics_approved():", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n review_request = context.get('review_request')\n perms = context.get('perms')\n user = request.user\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.status == ReviewRequest.PENDING_REVIEW and\n not is_site_read_only_for(user) and\n (user.pk == review_request.submitter_id or\n (bool(perms) and\n perms['reviews']['can_edit_reviewrequest'])))", "def should_render(\n self,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n unified_banner_feature.is_enabled(request=request))", "def is_reviewed(self, obj) -> bool: # pylint:disable=R0201\n return obj.profile.is_reviewed", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))", "def test_func(self):\n post = self.get_object()\n\n return self.request.user == post.author", "def test_func(self):\n post = self.get_object()\n\n return self.request.user == post.author", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n general_comments_feature.is_enabled(request=request) and\n not unified_banner_feature.is_enabled(request=request))", "async def _me_pets(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n messages = ch.print_pets(ctx.user_object)\n await self.paginate(ctx, messages)", "def is_liked(value, user: User):\n return value.is_liked(user)", "def can_view_post(user):\n #only students and admins may use the search, submitForm functions\n return (not bool(user.is_staff) or user.is_superuser)", "def check_vulnerability(self):\n\t\tpass", "def test_func(self):\n taxonomy = self.get_object()\n return self.request.user == taxonomy.author", "def test_func(self):\n taxonomy = self.get_object()\n return self.request.user == taxonomy.author" ]
[ "0.6347413", "0.6347413", "0.62672406", "0.5919822", "0.5908369", "0.5898732", "0.5872455", "0.5872455", "0.5865792", "0.5843716", "0.5758735", "0.5755841", "0.57297236", "0.5581121", "0.5575616", "0.55574644", "0.55574566", "0.5542494", "0.5519472", "0.5515512", "0.5515512", "0.5491612", "0.5491612", "0.54914045", "0.54859704", "0.5483362", "0.54829276", "0.5480413", "0.54749787", "0.54749787" ]
0.6545112
0
returns the Vote object of this link for the specified user (or the currently logged in user if user is not specified) or None if the user has not voted on it yet
def _user_vote(self, user): from . import Vote if not user.is_authenticated: return None return ( Vote.query .filter(Vote.type == 'links') .filter(Vote.user_id == user.id) .filter(Vote.thing_id == self.id) .first() )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_for_user(self, obj, user):\n if not user.is_authenticated:\n return None\n content_object = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(voter=user, content_type=content_object, object_id=obj._get_pk_val())\n\n except ObjectDoesNotExist:\n #print('No vote by {user} on {object}'.format(user=user, object=obj))\n return None\n\n return vote", "def get_for_user(self, obj, user):\n if not user.is_authenticated():\n return None\n ctype = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\n user=user)\n except models.ObjectDoesNotExist:\n vote = None\n return vote", "def get_for_user(self, obj, user):\r\n if not user.is_authenticated():\r\n return None\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\r\n user=user)\r\n except models.ObjectDoesNotExist:\r\n vote = None\r\n return vote", "def get_rating_for_user(self, user, ip_address=None, cookies={}):\n kwargs = dict(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n )\n\n if not (user and user.is_authenticated()):\n if not ip_address:\n raise ValueError('``user`` or ``ip_address`` must be present.')\n kwargs['user__isnull'] = True\n kwargs['ip_address'] = ip_address\n else:\n kwargs['user'] = user\n \n use_cookies = (self.field.allow_anonymous and self.field.use_cookies)\n if use_cookies:\n # TODO: move 'vote-%d.%d.%s' to settings or something\n cookie_name = 'vote-%d.%d.%s' % (kwargs['content_type'].pk, kwargs['object_id'], kwargs['key'][:6],) # -> md5_hexdigest?\n cookie = cookies.get(cookie_name)\n if cookie: \n kwargs['cookie'] = cookie\n else:\n kwargs['cookie__isnull'] = True\n \n try:\n rating = Vote.objects.get(**kwargs)\n return rating.score\n except Vote.MultipleObjectsReturned:\n pass\n except Vote.DoesNotExist:\n pass\n return", "def do_votes_by_user(parser, token):\r\n bits = token.contents.split()\r\n if len(bits) != 6:\r\n raise template.TemplateSyntaxError(\"'%s' tag takes exactly four arguments\" % bits[0])\r\n if bits[2] != 'on':\r\n raise template.TemplateSyntaxError(\"second argument to '%s' tag must be 'on'\" % bits[0])\r\n if bits[4] != 'as':\r\n raise template.TemplateSyntaxError(\"fourth argument to '%s' tag must be 'as'\" % bits[0])\r\n return VotesByUserNode(bits[1], bits[3], bits[5])", "def do_vote_by_user(parser, token):\r\n bits = token.contents.split()\r\n if len(bits) != 6:\r\n raise template.TemplateSyntaxError(\"'%s' tag takes exactly five arguments\" % bits[0])\r\n if bits[2] != 'on':\r\n raise template.TemplateSyntaxError(\"second argument to '%s' tag must be 'on'\" % bits[0])\r\n if bits[4] != 'as':\r\n raise template.TemplateSyntaxError(\"fourth argument to '%s' tag must be 'as'\" % bits[0])\r\n return VoteByUserNode(bits[1], bits[3], bits[5])", "def vote(self):\n if self.vote_exists():\n return self.update_vote()\n return self.create_vote()", "def get_vote(self, stats):\n if not isinstance(stats, Stats):\n raise TypeError\n return Vote.query.filter_by(user_id=self.id, stats_id=stats.id).first()", "def get_object(self, url_id, user_id):\n try:\n return Link.objects.get(id=url_id, user=user_id)\n except Link.DoesNotExist:\n return None", "def do_vote(self, stats, vote_val):\n vote = self.get_vote(stats)\n if vote is None:\n vote = Vote()\n vote.user = self\n vote.stats = stats\n vote.value = vote_val\n return vote", "def get_vote_value_for_object_parameter(obj, user, uuid = None, tpclass = None, name = None):\n t = type(obj)\n if t not in parameter_class_map:\n raise TypeError('type of the object must be model with parameters, not {0}'.format(t))\n\n valclass = parameter_class_map[t]['val']\n voteclass = parameter_class_map[t]['vote']\n q = Q(status='voted') & Q(parameter__obj=obj) & Q(**{'{0}__voter'.format(voteclass.__name__.lower()) : user})\n if isinstance(uuid, basestring):\n q &= Q(parameter__uuid = uuid)\n else:\n q &= Q(parameter__tpclass=tpclass)\n if tpclass == 'user':\n if not isinstance(name, basestring):\n raise Exception('name must be string if tpclass == \"user\"')\n q &= Q(parameter__name=name)\n try:\n ret = valclass.objects.filter(q).all()[0]\n except IndexError:\n return None\n return ret", "def get_user_votes(user_id: int) -> int:\n session = Session()\n\n # get user by id to ensure user exists\n get_user_by_id(user_id)\n # count votes for the user that haven't expired\n user_votes: int = session.query(Vote)\\\n .filter(Vote.user_id == user_id)\\\n .filter(Vote.vote_expiry > datetime.datetime.now()).count()\n\n session.close()\n\n return user_votes", "def _force_vote(self, user, value):\n previous = 0\n if value == 0:\n # Delete any previous vote object\n for v in Vote.objects.filter(user=user, content=self):\n previous = v.value\n v.delete()\n else:\n # Create or change vote object\n v, created = Vote.objects.get_or_create(user=user, content=self)\n previous = v.value\n v.value = value\n v.save(update_fields=['value'])\n return (previous-value)*(-1)", "def record_vote(self, obj, vote, user):\n if vote not in (+1, 0, -1):\n raise ValueError('Invalid vote (must be +1/0/-1)')\n content_type = ContentType.objects.get_for_model(obj)\n # First, try to fetch the instance of this row from DB\n # If that does not exist, then it is the first time we're creating it\n # If it does, then just update the previous one\n try:\n vote_obj = self.get(voter=user, content_type=content_type, object_id=obj._get_pk_val())\n if vote == 0 and not ZERO_VOTES_ALLOWED:\n vote_obj.delete()\n else:\n vote_obj.vote = vote\n vote_obj.save()\n\n except ObjectDoesNotExist:\n #This is the first time we're creating it\n try:\n if not ZERO_VOTES_ALLOWED and vote == 0:\n # This shouldn't be happening actually\n return\n vote_obj = self.create(voter=user, content_type=content_type, object_id=obj._get_pk_val(), vote=vote)\n except:\n print(( '{file}: something went wrong in creating a vote object at {line}'.format(file=str('__FILE__'), line=str('__LINE__'))))\n raise ObjectDoesNotExist\n\n return vote_obj", "def has_voted(self, user):\n return user.choice_set.filter(vote=self).exists()", "def get_object_with_user(self, user):\n try:\n uid = int(user)\n except TypeError:\n try:\n uid = user.id \n except:\n return None\n try:\n return self.get(db_player__user__id=uid)\n except Exception:\n return None", "def vote(request, model, object_id):\n if request.method != 'POST':\n raise Http404\n\n vote_type = request.POST.get('type', None)\n if vote_type == 'up' and auth.can_vote_up(request.user):\n vote_type = Vote.VOTE_UP\n elif vote_type == 'down' and auth.can_vote_down(request.user):\n vote_type = Vote.VOTE_DOWN\n else:\n raise Http404\n\n # TODO Ensure users can't vote on their own posts\n\n obj = get_object_or_404(model, id=object_id, deleted=False, locked=False)\n content_type = ContentType.objects.get_for_model(model)\n try:\n existing_vote = Vote.objects.get(content_type=content_type,\n object_id=object_id,\n user=request.user)\n except Vote.DoesNotExist:\n existing_vote = None\n\n if existing_vote is None:\n Vote.objects.create(content_type=content_type,\n object_id=object_id,\n user=request.user,\n vote=vote_type)\n else:\n if vote_type == existing_vote.vote:\n existing_vote.delete()\n else:\n existing_vote.vote = vote_type\n existing_vote.save()\n\n # TODO Reputation management\n\n if request.is_ajax():\n return JsonResponse({\n 'success': True,\n 'score': model._default_manager.filter(\n id=object_id).values_list('score', flat=True)[0],\n })\n else:\n return HttpResponseRedirect(obj.get_absolute_url())", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n try:\n self.object = User.objects.get(username= self.request.user)\n print(self.object)\n return self.object\n except:\n return None", "def get_object(self):\n try:\n self.object = User.objects.get(username= self.request.user)\n print(self.object)\n return self.object\n except:\n return None", "def get_object(self):\n return get_object_or_404(User, pk__iexact=self.request.user.id)", "def get_object(self):\n\n return self.request.user", "def get_object(self, queryset=None):\n return self.request.user", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)", "def vote(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n _logger.info(\"%s is trying to vote on %s\", request.user, context['song'])\n vote_dict = get_vote_dict(request.user)\n can_vote = context['song'].id not in vote_dict[request.user.id] and context['song'].ready\n if can_vote:\n vote = Vote()\n vote.user = request.user\n vote.song = context['song']\n vote.save()\n vote_dict[request.user.id].append(context['song'].id)\n cache.set('vote_dict', vote_dict)\n logging.info('%s voted on %s.', request.user, context['song'])\n return HttpResponse('Vote registered on %s.' % context['song'])\n else:\n logging.info('%s tried to vote more than once on %s.', request.user.username, context['song'])\n return HttpResponse(\"Du har allerede stemt på denne sangen i dag!\", content_type='text/plain', status=403)" ]
[ "0.7796545", "0.7590941", "0.75795954", "0.62007797", "0.6159514", "0.6088877", "0.5955817", "0.5776241", "0.57702315", "0.57532567", "0.56499624", "0.55581504", "0.5533404", "0.55273354", "0.5515895", "0.5509523", "0.54279643", "0.5415073", "0.5415073", "0.5415073", "0.5415073", "0.5415073", "0.53768915", "0.53768915", "0.5375505", "0.5372549", "0.53616303", "0.529747", "0.529747", "0.5278135" ]
0.8448549
0
Gets general annotation data for a dataset. Basically everything that the annotation Web UI needs, like classes and their colors, which keys to press for each class, and progress.
def annotation_data(dataset_name): try: classnames = get_classnames(dataset_name) except FileNotFoundError: return None else: # Removing R from this would not be sufficient, and would look like a bug all_keys = 'abcdefghijklmnopqrstuvwxyz' colors = class_colors(len(classnames)) out_colors = {} for cn, cc in zip(classnames, colors): out_colors[cn] = to_hex(cc) keys_list = [] keys = set() for cn in classnames: cn = cn.lower() success = False for letter in cn: if not (letter in keys): if not letter == 'r': # 'R' is not okay, since that button is used for cancelling keys_list.append(letter) keys.add(letter) success = True break while not success: letter = choice(all_keys) if not (letter in keys): if not letter == 'r': # 'R' is not okay, since that button is used for cancelling keys.add(letter) keys_list.append(letter) success = True key_codes_list = [ord(x)-32 for x in keys_list] # convert to nonsense Javascript keyCodes keys_list = [x.upper() for x in keys_list] # are nicer visualized in upper case in the Web UI train_stats = get_annotation_stats(dataset_name, 'train') test_stats = get_annotation_stats(dataset_name, 'test') out = [classnames, out_colors, keys_list, key_codes_list, train_stats, test_stats] return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_annotations(self) -> List[Dict[int, Dict[str, Any]]]:\n annotations = []\n for item in self.collector:\n data_file_type = os.path.basename(item).split(\".\")[-1]\n annotations.append(\n load_annotation_file(\n os.path.join(\n self.annotation_folder,\n os.path.basename(item).replace(data_file_type, \"json\"),\n )\n )\n )\n\n return annotations", "def ds_atags(self):\n atags = {\n 'unit': {\n 'atname': 'unit',\n 'data_type': 'text',\n 'description': 'Unit of measure for values in data'},\n 'description': {\n 'atname': 'description',\n 'data_type': 'text',\n 'description': 'Human readable description of data'},\n 'comments': {\n 'atname': 'comments',\n 'data_type': 'text',\n 'description': 'Comments about the data set'},\n 'references': {\n 'atname': 'references',\n 'data_type': 'text',\n 'description': 'path to group, diminsion index or field being referenced'},\n 'semantic_type': {\n 'atname': 'semantic_type',\n 'data_type': 'text',\n 'description': 'Semantic type of data stored'},\n 'scale': {\n 'atname': 'conversion',\n 'data_type': 'float',\n 'description': 'Scale factor to convert stored values to units of measure'},\n }\n return atags", "def get_dataset(reader: DataReader):\n\n xs = []\n ys = []\n\n for annotation_sentences in reader.annotations:\n for annotation in annotation_sentences:\n xs.append([annotation.fee_raw] + annotation.sentence)\n ys.append(annotation.frame)\n\n return xs, ys", "def __call__(self):\n\n dataset = TextOnlyCocoAnnotation()\n\n with open(self.path) as read_file:\n\n json_loaded = json.load(read_file)\n\n for i, value in tqdm(json_loaded['imgs'].items()):\n image_path = os.path.join(os.path.dirname(self.path), 'train2014',\n value['file_name'])\n dataset_type = value['set']\n\n if dataset_type not in self.sets:\n print(dataset_type)\n continue\n\n for annotation_id in json_loaded['imgToAnns'][i]:\n annotation_value = json_loaded['anns'][str(annotation_id)]\n word_annotation = self.parse_annotation_instance(annotation_value)\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset", "def get_annotations(data_folder):\n annotations_files = os.listdir('data_annotations')\n\n annotations = {}\n for file_name in annotations_files:\n annotation = json.load(\n open(f'{data_folder}/annotations/{file_name}', 'r')\n )\n key = int(annotation['entity_id'])\n annotations[key] = annotation['value']['value']\n\n return annotations", "def _get_annotation_data_attr(self, index, el):\r\n\r\n data_attrs = {}\r\n attrs_map = {\r\n 'body': 'data-comment-body',\r\n 'title': 'data-comment-title',\r\n 'problem': 'data-problem-id'\r\n }\r\n\r\n for xml_key in attrs_map.keys():\r\n if xml_key in el.attrib:\r\n value = el.get(xml_key, '')\r\n html_key = attrs_map[xml_key]\r\n data_attrs[html_key] = {'value': value, '_delete': xml_key}\r\n\r\n return data_attrs", "def _annotations(request):\n result = Search(request).run(MultiDict(request.params))\n\n return request.find_service(AnnotationReadService).get_annotations_by_id(\n ids=result.annotation_ids\n )", "def get_iiai_dataset():\n ds = AttrDict()\n \n # classes = [\n # '__background__','Planes', 'Civilian Passenger', 'Civ Jet', 'Civ Light Aircraft', \n # 'Civ Transport', 'Mil Bomber', 'Mil Fighter', 'Mil Transport', 'Plane Engine', \n # 'Ships', 'Destroyer', 'Frigate', 'Cruiser', 'Aircraft Carrier', 'Cargo Ship', \n # 'Boats', 'Submarines', 'Sailing Ships/Boats', 'Tanker', 'Helicopter', 'Civilian', \n # 'Military', 'Vehicles', 'Cars', 'Pickup Trucks', 'Motorcycles', 'Semi Truck', 'Bus', \n # 'Ambulance', 'Fire Truck', 'Bridges', 'Pedestrian', 'Buildings', 'Mosques', 'Towers', \n # 'Residential', 'Other', 'Greenhouse', 'Warehouses', 'Parking Lots', 'Air Traffic Control Tower', \n # 'Runway', 'Hangar', 'Taxiways', 'Aprons', 'Helipad', 'Satellite Dish', 'Solar Panels', 'Storage Tank', \n # 'Roundabout', 'Swimming Pool', 'Sports Stadium/Field', 'Tennis Court', 'Basketball Court', \n # 'Soccer Field', 'Baseball Diamond', 'Stadium', 'Athletic Track', 'Rail(train)', 'Intersection/Crossroads', \n # 'Shipping Container Lot', 'Shipping Containers', 'Crane', 'Construction', 'Floating', 'Gantry', 'Tower', \n # 'Train', 'Engine', 'Boxcar', 'Passenger Car', 'Flatbed Car', 'Hopper', 'Tanker', 'Breakwater', 'Pier', \n # 'Quay', 'Harbor', 'Drydocks', 'Floating Docks', 'Slip', 'Telephone Poles', 'Hovercraft', 'Mil Vehicles', \n # 'Tanks', 'Self-propelled Artillery', 'Towed Artillery', 'APC', 'Fighting Veh.', 'Support Vehicles', \n # 'Missiles/Missile Systems', 'Comms Towers', 'Power Station'\n # ]\n\n # classes = ['__background__',\n # 'Planes', \n # 'Ships', \n # 'Helicopter', \n # 'Vehicles', \n # 'Bridges', \n # 'Pedestrian', \n # 'Buildings', \n # 'Parking Lots', \n # 'Airports', \n # 'Satellite Dish', \n # 'Solar Panels', \n # 'Storage Tank', \n # 'Roundabout', \n # 'Swimming Pool',\n # 'Sports Stadium/Field',\n # 'Rail(train)', \n # 'Intersection/Crossroads', \n # 'Shipping Container Lot', \n # 'Shipping Containers', \n # 'Crane',\n # 'Train', \n # 'Port' ,\n # 'Telephone Poles', \n # 'Hovercraft', \n # 'Mil Vehicles',\n # 'Missiles/Missile Systems', \n # 'Comms Towers', \n # 'Power Station'\n # ]\n\n # classes = ['__background__', \n # 'Planes', \n # 'Ships', \n # 'Helicopter', \n # 'Vehicles', \n # 'Bridges', \n # 'Buildings', \n # 'Parking Lots', \n # 'Satellite Dish', \n # 'Solar Panels', \n # 'Storage Tank', \n # 'Swimming Pool', \n # 'Sports Stadium/Field', \n # 'Shipping Containers', \n # 'Crane', 'Train', \n # 'Mil Vehicles', \n # 'Missiles/Missile Systems', \n # 'Comms Towers']\n\n #12\n classes = ['__background__', \n 'Planes', \n 'Ships', \n 'Helicopter', \n 'Vehicles', \n 'Buildings', \n 'Parking Lots', \n 'Storage Tank', \n 'Swimming Pool', \n 'Sports Stadium/Field', \n 'Shipping Containers', \n 'Crane', \n 'Comms Towers']\n\n\n\n\n\n\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds", "def get_overview_annotations() -> dict:\n return {}", "def _get_data(filename: str, image_path: str, annotation_path: str) -> Data:\n data = Data(os.path.join(image_path, f\"{filename}.jpg\"))\n box2d = []\n with open(os.path.join(annotation_path, f\"{filename}.xml\"), \"r\", encoding=\"utf-8\") as fp:\n objects = xmltodict.parse(fp.read())[\"annotation\"][\"object\"]\n if not isinstance(objects, list):\n objects = [objects]\n for obj in objects:\n attributes = {attribute: bool(int(obj[attribute])) for attribute in _BOOLEAN_ATTRIBUTES}\n attributes[\"pose\"] = obj[\"pose\"]\n bndbox = obj[\"bndbox\"]\n box2d.append(\n LabeledBox2D(\n float(bndbox[\"xmin\"]),\n float(bndbox[\"ymin\"]),\n float(bndbox[\"xmax\"]),\n float(bndbox[\"ymax\"]),\n category=obj[\"name\"],\n attributes=attributes,\n )\n )\n data.label.box2d = box2d\n return data", "def _load_annotations(self):\n if self._raw_annotations is not None:\n return self._raw_annotations\n\n dataset_file = os.path.join(self._annotation_path, 'complete_dataset_v{}.pkl'.format(self._version))\n idx_file = os.path.join(self._annotation_path, 'splits_indices_v{}.pkl'.format(self._version))\n\n def get_split_from_ds(ds, idx):\n split = {}\n keys = sorted(ds.keys())\n for j in xrange(len(idx)):\n k = keys[idx[j]]\n split[k] = ds[k]\n return split\n\n with open(idx_file, 'rb') as fid:\n indices = cPickle.load(fid)[self._image_set]\n with open(dataset_file, 'rb') as fid:\n ds = cPickle.load(fid)\n self._raw_annotations = get_split_from_ds(ds, indices)\n\n return self._raw_annotations", "def annotations(self) -> pulumi.Output[Mapping[str, Any]]:\n return pulumi.get(self, \"annotations\")", "def _info(self) -> tfds.core.DatasetInfo:\n features = tfds.features.FeaturesDict({\n \"tokens\":\n tfds.features.Sequence(tfds.features.Text()),\n \"tags\":\n tfds.features.Sequence(\n tfds.features.ClassLabel(names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ])),\n \"langs\":\n tfds.features.Sequence(tfds.features.Text()),\n \"spans\":\n tfds.features.Sequence(tfds.features.Text()),\n })\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=features,\n supervised_keys=None,\n homepage=\"https://github.com/afshinrahimi/mmner\",\n citation=_CITATION,\n )", "def draw_dataset_dict(self, dic):\n annos = dic.get(\"annotations\", None)\n if annos:\n if \"segmentation\" in annos[0]:\n masks = [x[\"segmentation\"] for x in annos]\n else:\n masks = None\n if \"keypoints\" in annos[0]:\n keypts = [x[\"keypoints\"] for x in annos]\n keypts = np.array(keypts).reshape(len(annos), -1, 3)\n else:\n keypts = None\n\n boxes = [\n BoxMode.convert(x[\"bbox\"], x[\"bbox_mode\"], BoxMode.XYXY_ABS) if len(x[\"bbox\"]) == 4 else x[\"bbox\"]\n for x in annos\n ]\n\n colors = None\n category_ids = [x[\"category_id\"] for x in annos]\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n colors = [self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids]\n names = self.metadata.get(\"thing_classes\", None)\n labels = None\n labels = _create_text_labels(\n category_ids,\n scores=None,\n class_names=[\"PB\"],\n # is_crowd=[x.get(\"iscrowd\", 0) for x in annos],\n )\n boxes = None\n alpha = 0\n # colors=[(1.0,0.0,0.0)]*99\n # colors=['c'] * 99\n linestyle='dashed'\n self.overlay_instances(\n labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors, alpha=alpha,linestyle='dashed'\n )\n\n sem_seg = dic.get(\"sem_seg\", None)\n if sem_seg is None and \"sem_seg_file_name\" in dic:\n with PathManager.open(dic[\"sem_seg_file_name\"], \"rb\") as f:\n sem_seg = Image.open(f)\n sem_seg = np.asarray(sem_seg, dtype=\"uint8\")\n if sem_seg is not None:\n self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)\n\n pan_seg = dic.get(\"pan_seg\", None)\n if pan_seg is None and \"pan_seg_file_name\" in dic:\n with PathManager.open(dic[\"pan_seg_file_name\"], \"rb\") as f:\n pan_seg = Image.open(f)\n pan_seg = np.asarray(pan_seg)\n from panopticapi.utils import rgb2id\n\n pan_seg = rgb2id(pan_seg)\n if pan_seg is not None:\n segments_info = dic[\"segments_info\"]\n pan_seg = torch.Tensor(pan_seg)\n self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5)\n return self.output", "def get_annotations(self, img_id):\n return self._img_id2annotations.get(img_id, [])", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n if self.subset == 'train':\n image_paths, annotation_paths = self.collect_train_paths()\n elif self.subset == 'val':\n image_paths, annotation_paths = self.collect_val_paths()\n\n for image_path, annotation_path in tqdm(zip(image_paths, annotation_paths)):\n word_annotations = []\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n word_annotations.append(self.parse_line(line))\n should_add = not self.is_latin_required\n if self.is_latin_required:\n for word_annotation in word_annotations:\n if word_annotation['attributes']['language'].lower() == 'latin':\n should_add = True\n break\n if should_add:\n for word_annotation in word_annotations:\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset", "def group_annotation_by_class(dataset):\n\ttrue_case_stat = {}\n\tall_gt_boxes = {}\n\tfor i in range(len(dataset)):\n\t\timage_id, annotation = dataset.get_annotation(i)\n\t\tgt_boxes, classes = annotation\n\t\tgt_boxes = torch.from_numpy(gt_boxes)\n\t\tfor i in range(0,len(classes)):\n\t\t\tclass_index = int(classes[i])\n\t\t\tgt_box = gt_boxes[i]\n\t\t\ttrue_case_stat[class_index] = true_case_stat.get(class_index, 0) + 1\n\t\t\tif class_index not in all_gt_boxes:\n\t\t\t\tall_gt_boxes[class_index] = {}\n\t\t\tif image_id not in all_gt_boxes[class_index]:\n\t\t\t\tall_gt_boxes[class_index][image_id] = []\n\t\t\tall_gt_boxes[class_index][image_id].append(gt_box)\n\n\tfor class_index in all_gt_boxes:\n\t\tfor image_id in all_gt_boxes[class_index]:\n\t\t\tall_gt_boxes[class_index][image_id] = torch.stack(all_gt_boxes[class_index][image_id])\n\treturn true_case_stat, all_gt_boxes", "def get_annotations(self):\n ann = wfdb.rdann(self.patient_number, 'atr', pb_dir='mitdb', return_label_elements=['symbol', 'label_store',\n 'description'],\n summarize_labels=True)\n\n mit_bih_labels_str = ann.symbol\n\n labels_locations = ann.sample\n\n labels_description = ann.description\n\n return mit_bih_labels_str, labels_locations, labels_description", "def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n 'image': tfds.features.Image(shape=(None, None, 1)),\n 'bboxes': tfds.features.Sequence({'bbox': tfds.features.BBoxFeature()}),\n 'image_id': tfds.features.Text(),\n 'series_id': tfds.features.Text(),\n 'study_id': tfds.features.Text(),\n 'category': tfds.features.ClassLabel(names=['negative', 'typical', 'atypical', 'indeterminate'])\n }),\n supervised_keys=('image', 'category'),\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )", "def datasets(self):\n return [Dataset.GWAS_CATALOG, Dataset.CLINVAR, Dataset.EFO]", "def load_data(self, annotation_json, images_dir):\r\n # Load json from file\r\n json_file = open(annotation_json)\r\n coco_json = json.load(json_file)\r\n json_file.close()\r\n \r\n # Add the class names using the base method from utils.Dataset\r\n source_name = \"coco_like\"\r\n ids={}\r\n i=0\r\n for category in coco_json['categories']:\r\n i+=1\r\n class_id = category['id']\r\n ids[class_id]=i\r\n class_name = category['name']\r\n if class_id < 1:\r\n print('Error: Class id for \"{}\" cannot be less than one. (0 is reserved for the background)'.format(class_name))\r\n return\r\n \r\n self.add_class(source_name, class_id, class_name)\r\n for annotation in coco_json['annotations']:\r\n annotation[\"category_id\"]=ids[annotation[\"category_id\"]]\r\n \r\n # Get all annotations\r\n \r\n annotations = {}\r\n for annotation in coco_json['annotations']:\r\n image_id = annotation['image_id']\r\n if image_id not in annotations:\r\n annotations[image_id] = []\r\n annotations[image_id].append(annotation)\r\n \r\n # Get all images and add them to the dataset\r\n seen_images = {}\r\n for image in coco_json['images']:\r\n image_id = image['id']\r\n if image_id in seen_images:\r\n print(\"Warning: Skipping duplicate image id: {}\".format(image))\r\n else:\r\n seen_images[image_id] = image\r\n try:\r\n image_file_name = image['file_name']\r\n image_width = image['width']\r\n image_height = image['height']\r\n except KeyError as key:\r\n print(\"Warning: Skipping image (id: {}) with missing key: {}\".format(image_id, key))\r\n \r\n image_path = os.path.abspath(os.path.join(images_dir, image_file_name))\r\n image_annotations = annotations[image_id]\r\n \r\n # Add the image using the base method from utils.Dataset\r\n self.add_image(\r\n source=source_name,\r\n image_id=image_id,\r\n path=image_path,\r\n width=image_width,\r\n height=image_height,\r\n annotations=image_annotations\r\n )", "def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n citation=_CITATION,\n features=tfds.features.FeaturesDict({\n 'image': tfds.features.Image(shape=(None, None, 3)),\n 'label': tfds.features.ClassLabel(names=_CLASS_NAMES),\n }),\n homepage=_HOMEPAGE,\n supervised_keys=('image', 'label'),\n )", "def _info(self) -> tfds.core.DatasetInfo:\n # TODO(kappatng): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n \"image\": tfds.features.Tensor(shape=[41, 41], dtype=tf.float32),\n \"psf\": tfds.features.Tensor(shape=[41, 41], dtype=tf.float32),\n \"variance\": tfds.features.Tensor(shape=[41, 41], dtype=tf.float32),\n \"mask\": tfds.features.Tensor(shape=[41, 41], dtype=tf.int32),\n\t}),\n # If there's a common (input, target) tuple from the\n # features, specify them here. They'll be used if\n # `as_supervised=True` in `builder.as_dataset`.\n supervised_keys=(\"image\", \"image\"),\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )", "def extract_annotations(chosen_classes, annotations_dir, dataset_dir, val_set, train_set): \n extension = ''\n annotations_train = []\n annotations_val = []\n class_names = set()\n for xml_file in [f for f in os.listdir(annotations_dir) if f.endswith(\".xml\")]:\n tree = ET.parse(os.path.join(annotations_dir, xml_file))\n root = tree.getroot()\n file_name = None\n image = xml_file[:-3] \n try: \n extension = [image_file for image_file in os.listdir(dataset_dir) if image_file.split('.')[0]==image[:-1]][0].split('.')[1]\n \n if os.path.exists(dataset_dir + image+extension):\n for elem in root:\n if elem.tag == 'filename':\n file_name = os.path.join(dataset_dir, image+extension)\n if elem.tag == 'object':\n obj_name = None\n coords = []\n for subelem in elem:\n if subelem.tag == 'name':\n obj_name = subelem.text \n if subelem.tag == 'bndbox':\n for subsubelem in subelem:\n coords.append(subsubelem.text)\n item = [file_name] + coords + [obj_name]\n if obj_name in chosen_classes:\n class_names.add(obj_name)\n if xml_file in val_set:\n annotations_val.append(item)\n elif xml_file in train_set:\n annotations_train.append(item)\n except:\n print(\"Image file for xml with name: \", xml_file, \" does not seem to exist!\")\n return annotations_train, annotations_val, class_names", "def _basic_data_info(X, y):\n num_samples, num_feats = X.shape # start with X properties\n\n # Compute distribution\n classes, counts, percs = _class_distribution(y)\n num_classes = classes.size\n\n # Return data info dictionary\n output_dic = {\n \"Num_samples\": num_samples,\n \"Num_feats\": num_feats,\n \"Num_classes\": num_classes,\n \"classes\": classes,\n \"counts\": counts,\n \"percs\": percs\n }\n\n return output_dic", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n for image_name in tqdm(sorted(os.listdir(self.folder))):\n if image_name.endswith('JPG'):\n image_path = os.path.join(self.folder, image_name)\n annotation_path = os.path.join(self.folder, image_name.replace('.JPG', '.gt'))\n\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n dataset.add_bbox(image_path, imagesize.get(image_path),\n self.parse_line(line))\n\n return dataset", "def fetch_annotation(self):\n annotation = self.dataset.annotation\n\n # Fetch filters for the dataset's alignment dimension from this query builder\n dataset_alignment_dimension_filters = self.fetch_query_filters(\n annotation.dataset_alignment_field_alias\n )\n\n # Update fields in filters for the dataset's alignment dimension to the annotation's alignment field\n annotation_alignment_dimension_filters = [\n dataset_alignment_filter.for_(annotation.alignment_field)\n for dataset_alignment_filter in dataset_alignment_dimension_filters\n ]\n\n annotation_alignment_field = annotation.alignment_field\n if annotation_alignment_field.data_type == DataType.date:\n dataset_alignment_dimension = self.fetch_query_dimension(\n annotation.dataset_alignment_field_alias\n )\n\n if hasattr(dataset_alignment_dimension, \"interval_key\"):\n # Use the interval key of the dataset's alignment dimension for the annotation's alignment field\n # Otherwise we would need to copy it to prevent issues from patching directly\n annotation_alignment_field = DatetimeInterval(\n annotation.alignment_field, dataset_alignment_dimension.interval_key\n )\n\n annotation_dimensions = [annotation_alignment_field, annotation.field]\n\n annotation_query = make_slicer_query(\n database=self.dataset.database,\n base_table=annotation.table,\n dimensions=annotation_dimensions,\n filters=annotation_alignment_dimension_filters,\n )\n\n annotation_df = fetch_data(\n self.dataset.database, [annotation_query], [annotation.alignment_field]\n )\n\n return annotation_df", "def _get_annotation(self, image_id):\n annotation_file = self.image_sets_dir / f'{image_id}.xml'\n objects = ET.parse(annotation_file).findall('object')\n boxes = []\n labels = []\n is_difficult = []\n for obj in objects:\n class_name = obj.find('name').text.lower().strip()\n if class_name in self.class_dict:\n bbox = obj.find('bndbox')\n\n x0 = float(bbox.find('xmin').text) - 1\n y0 = float(bbox.find('ymin').text) - 1\n x1 = float(bbox.find('xmax').text) - 1\n y1 = float(bbox.find('ymax').text) - 1\n boxes.append([x0, y0, x1, y1])\n\n labels.append(self.class_dict[class_name])\n\n is_difficult_str = obj.find('difficult').text\n is_difficult.append(int(is_difficult_str) if is_difficult_str else 0)\n\n return (np.array(boxes, dtype=np.float32),\n np.array(labels, dtype=np.int64),\n np.array(is_difficult, dtype=np.uint8))", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n n_images = 1000 if self.is_train else 500\n for i in tqdm(range(1, n_images + 1)):\n image_path = os.path.join(self.images_folder, 'img_{}.jpg'.format(i))\n annotation_path = os.path.join(self.annotations_folder, 'gt_img_{}.txt'.format(i))\n\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n dataset.add_bbox(image_path, imagesize.get(image_path), self.parse_line(line))\n\n return dataset", "def _parse_anno_info(self, annotations):\n gt_bboxes, gt_bboxes_ignore = [], []\n gt_masks, gt_masks_ignore = [], []\n gt_labels = []\n for ann in annotations:\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(ann['bbox'])\n gt_masks_ignore.append(ann.get('segmentation', None))\n else:\n gt_bboxes.append(ann['bbox'])\n gt_labels.append(ann['category_id'])\n gt_masks.append(ann.get('segmentation', None))\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks_ignore=gt_masks_ignore,\n masks=gt_masks)\n\n return ann" ]
[ "0.6100371", "0.5930464", "0.58168316", "0.57694376", "0.57194066", "0.56618434", "0.56347775", "0.5609098", "0.55929613", "0.55875194", "0.5569791", "0.55386776", "0.55345094", "0.55141187", "0.55112606", "0.55028033", "0.5489555", "0.54789513", "0.54480016", "0.5446405", "0.54447186", "0.54440874", "0.54371035", "0.54326636", "0.5409207", "0.5403474", "0.54007626", "0.53983456", "0.5395605", "0.5389025" ]
0.7287963
0
Calculate the magnitude of a 3d vector
def vector_3d_magnitude(x, y, z): return math.sqrt((x * x) + (y * y) + (z * z))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def magnitude(v):\n if len(v.arr) != 4 or v[3] != 0.0:\n raise ValueError(\"Only use this function with vectors.\")\n return np.sqrt(np.sum(np.square(v.arr)))", "def vector_magnitude(v):\n\n v = np.atleast_2d(v)\n\n return np.sqrt((v**2).sum(axis=1))", "def magnitude( vectors ):\n vectors = asarray( vectors, _aformat(vectors))\n if not (len(shape(vectors))==2 and shape(vectors)[1] in (3,4)):\n vectors = reshape( vectors, (-1,3))\n result = sum(vectors*vectors,1 ) # index 1\n sqrt( result, result )\n return result", "def magnitude(v: Vector) -> float:\n return math.sqrt(sum_of_squares(v)) #math.sqrt() is a square root function", "def magnitude(v: Vector) -> float:\n return math.sqrt(sum_of_squares(v))", "def magnitude(vector:tuple)->float:\n return math.sqrt(vector[0] ** 2 + vector[1] ** 2)", "def vector_magnitude(vec, axis=None):\n return (vec * vec).sum(axis=axis) ** 0.5", "def magnitude(vector):\n return math.sqrt(sum_of_squares(vector))", "def magnitude_of_vector(v):\n return math.sqrt(sum_of_squares(v))", "def magnitude(self):\n return math.sqrt(self.x**2 + self.y**2 + self.z**2)", "def magnitude(self): # @todo @caution check: something wrong?\n\n return (math.sqrt(reduce(lambda x, y: x+y,\n [x**2 for x in self.vector])))", "def magnitude(v):\n\treturn math.sqrt(sum_squares(v))", "def magnitude(complex_vec, axis=None):\n cv_mag_vector = complex_magnitude(complex_vec)\n return vector_magnitude(cv_mag_vector, axis=axis)", "def magnitude_vect(vect):\n mag = (vect[0] ** 2 + vect[1] ** 2 + vect[2] ** 2) ** .5\n return mag", "def magnitude(v):\n return math.sqrt(sum_of_squares(v))", "def magnitude(*args):\r\n return sqrt(dot(args, args))", "def magnitude(self):\n\t\treturn sqrt(self.dot(self))", "def magni(vector):\n return(np.linalg.norm(vector))", "def norm3d(self) -> float:\n\n return self.v3ddict.norm3d()", "def calculate_vector_magnitude(self, vector):\n magnitude_squared = 0\n for d in vector:\n magnitude_squared += pow(d, 2)\n \n magnitude = pow(magnitude_squared, .5)\n\n return magnitude", "def magnitude(X):\r\n r = np.real(X)\r\n i = np.imag(X)\r\n return np.sqrt(r * r + i * i);", "def magnitude(a):\n return dot_product(a, a)**0.5", "def magnitude(self):\n return self.real ** 2 + numpy.inner(self.pure, self.pure)", "def vector_length(self, x: float, y: float, z: float) -> float:\n A = 2.0 * (x * y * self.aga + x * z * self.bbe + y * z * self.cal)\n return sqrt(x ** 2 * self.asq + y ** 2 * self.bsq + z ** 2 * self.csq + A)", "def test_magnitude(self):\n\n a1 = vectors.Vector(1, 2, 3)\n self.assertEqual(a1.magnitude(), math.sqrt(14))\n\n a1 = vectors.Vector(-1, -2, -3)\n self.assertEqual(a1.magnitude(), math.sqrt(14))\n\n a1 = vectors.Vector(1, 0, 0)\n self.assertEqual(a1.magnitude(), 1)\n\n a1 = vectors.Vector(0, 1, 0)\n self.assertEqual(a1.magnitude(), 1)\n\n a1 = vectors.Vector(0, 0, 1)\n self.assertEqual(a1.magnitude(), 1)", "def magnitude(pos):\n x, y = pos\n return x * x + y * y", "def magnitude(point_a, point_b):\n vector = np.subtract(point_a, point_b)\n total = 0\n for i in vector:\n total += i**2\n mag = total ** .5\n return mag", "def magnitude(self):\n return math.sqrt(self.x**2 + self.y**2)", "def magnitude(self):\n return math.sqrt(self.x**2 + self.y**2)", "def get_magnitude_vector(given_vector):\n\n # Note: given vector is a dictionary of the form:\n # {term : score}\n\n magnitude = 0\n for item in given_vector.values():\n magnitude += item ** 2\n\n return magnitude ** 0.5" ]
[ "0.77811116", "0.759065", "0.7551002", "0.7550866", "0.7518799", "0.7386942", "0.7368965", "0.7349469", "0.73449564", "0.7306271", "0.72413987", "0.71244526", "0.7120417", "0.7073096", "0.70674366", "0.7043916", "0.70030254", "0.687828", "0.67796177", "0.67767006", "0.6765149", "0.66924536", "0.6629383", "0.6587228", "0.6579455", "0.65174407", "0.6458361", "0.643945", "0.643945", "0.6361468" ]
0.8485649
0
Returns a slice of y with a specified width
def windowcut(y, i, j, dur=1, sr=16000, discard_short=True): if dur < len(y)/sr: left = round((i+j)/2)-round(dur*sr/2) right = left+round(dur*sr) if left < 0: left = 0 right = round(dur*sr) elif right > len(y): right = len(y) left = right - round(dur*sr) return y[int(left):int(right)] else: # discard data if total length is smaller than duration we want if discard_short: return None else: # padd with zeros at the end return np.array(np.append(y, np.zeros(dur*sr-len(y))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crop_scope(self, x, yin_start, scope_shift):\n return torch.stack(\n [\n x[\n i,\n yin_start\n + scope_shift[i] : yin_start\n + self.yin_scope\n + scope_shift[i],\n :,\n ]\n for i in range(x.shape[0])\n ],\n dim=0,\n )", "def _get_slice(series, start, length):\n return [ int(s) for s in series[start:start+length] ]", "def cutout(yx, half_size, shape=None):\n\n if shape is None:\n shape = (inf, inf)\n if not np.iterable(half_size):\n half_size = (half_size, half_size)\n\n s = np.s_[max(yx[0] - half_size[0], 0):\n min(yx[0] + half_size[0] + 1, shape[0]),\n max(yx[1] - half_size[1], 0):\n min(yx[1] + half_size[1] + 1, shape[1])]\n return s", "def _slice_at_axis(sl, axis):\n return (slice(None),) * axis + (sl,) + (...,)", "def perimRect(length, width):\n return 2 * (length + width)", "def __getslice__(self,i,j):\n return self.x[i:j]", "def carve_slice(\n self, x_index=0, width=config()[\"panel\"][\"width\"],\n ):\n piece = []\n for row in self.grid:\n piece.append(row[x_index : x_index + width])\n\n return piece", "def slice(data, size):\n\treturn dice(data, size).T", "def crop(img: 'np.ndarray', x: int, y: int, width: int, height: int) -> 'np.ndarray':\n return img[y:y+height, x:x+width]", "def slice_data(xdata, ydata, x_range):\n\tdata = zip(xdata, ydata)\n\tsliced_data = [d for d in data if d[0] >= x_range[0] and d[0] <= x_range[1]]\n\treturn array(zip(*sliced_data))", "def slices(self, X, y=None):\n self.logger.debug('Slicing X with shape {}'.format(X.shape))\n\n n_samples = X.shape[0]\n sample_shape = X[0].shape\n window_shape = [\n max(1, int(s * self.stride_ratio)) if i < 2 else s\n for i, s in enumerate(sample_shape)\n ]\n\n #\n # Generates all the windows slices for X.\n # For each axis generates an array showing how the window moves on that axis.\n #\n slices = [\n [slice(i, i + window_axis) for i in range(sample_axis - window_axis + 1)]\n for sample_axis, window_axis in zip(sample_shape, window_shape)\n ]\n total_windows = np.prod([len(s) for s in slices])\n\n self.logger.info('Window shape: {} Total windows: {}'.format(window_shape, total_windows))\n\n #\n # For each window slices, return the same slice for all the samples in X.\n # For example, if for the first window we have the slices [slice(0, 10), slice(0, 10)],\n # this generates the following slice on X:\n # X[:, 0:10, 0:10] == X[(slice(None, slice(0, 10), slice(0, 10))]\n #\n # Since this generates on each iteration a window for all the samples, we insert the new\n # windows so that for each sample the windows are consecutive. This is done with the\n # ordering_range magic variable.\n #\n windows_slices_list = None\n ordering_range = np.arange(n_samples) + 1\n\n for i, axis_slices in enumerate(itertools.product(*slices)):\n if windows_slices_list is None:\n windows_slices_list = X[(slice(None),) + axis_slices]\n else:\n windows_slices_list = np.insert(\n windows_slices_list,\n ordering_range * i,\n X[(slice(None),) + axis_slices],\n axis=0,\n )\n\n #\n # Converts any sample with dimention higher or equal than 2 to just one dimention\n #\n windows_slices = \\\n windows_slices_list.reshape([windows_slices_list.shape[0], np.prod(window_shape)])\n\n #\n # If the y parameter is not None, returns the y value for each generated window\n #\n if y is not None:\n y = np.repeat(y, total_windows)\n\n return windows_slices, y", "def slices(self, X, y=None):\n self.logger.debug('Slicing X with shape {}'.format(X.shape))\n\n n_samples = X.shape[0]\n sample_shape = X[0].shape\n window_shape = [\n max(1, int(s * self.stride_ratio)) if i < 2 else s\n for i, s in enumerate(sample_shape)\n ]\n\n #\n # Generates all the windows slices for X.\n # For each axis generates an array showing how the window moves on that axis.\n #\n slices = [\n [slice(i, i + window_axis) for i in range(sample_axis - window_axis + 1)]\n for sample_axis, window_axis in zip(sample_shape, window_shape)\n ]\n total_windows = np.prod([len(s) for s in slices])\n\n self.logger.info('Window shape: {} Total windows: {}'.format(window_shape, total_windows))\n\n #\n # For each window slices, return the same slice for all the samples in X.\n # For example, if for the first window we have the slices [slice(0, 10), slice(0, 10)],\n # this generates the following slice on X:\n # X[:, 0:10, 0:10] == X[(slice(None, slice(0, 10), slice(0, 10))]\n #\n # Since this generates on each iteration a window for all the samples, we insert the new\n # windows so that for each sample the windows are consecutive. This is done with the\n # ordering_range magic variable.\n #\n windows_slices_list = None\n ordering_range = np.arange(n_samples) + 1\n\n for i, axis_slices in enumerate(itertools.product(*slices)):\n if windows_slices_list is None:\n windows_slices_list = X[(slice(None),) + axis_slices]\n else:\n windows_slices_list = np.insert(\n windows_slices_list,\n ordering_range * i,\n X[(slice(None),) + axis_slices],\n axis=0,\n )\n\n #\n # Converts any sample with dimention higher or equal than 2 to just one dimention\n #\n windows_slices = \\\n windows_slices_list.reshape([windows_slices_list.shape[0], np.prod(window_shape)])\n\n #\n # If the y parameter is not None, returns the y value for each generated window\n #\n if y is not None:\n y = np.repeat(y, total_windows)\n\n return windows_slices, y", "def MakeDifficulties(center, width, n):\n low, high = center-width, center+width\n return numpy.linspace(low, high, n)", "def slice_data_to_2D(x, y):\n if(x.shape != y.shape):\n print(\"Error: Images and Labels do not have the same shape\")\n else:\n x = np.array([(x[i, :, :, z]) for i in range(x.shape[0]) for z in range(x.shape[3])])\n y = np.array([(y[i, :, :, z]) for i in range(y.shape[0]) for z in range(y.shape[3])])\n return x,y", "def offset_y(self, X, y):\n X, y = self.check_consistent_params(X, y)\n if len(y.shape) == 1:\n offset = len(y) - X.shape[0]\n return y[offset:]\n else:\n offset = len(y[0]) - X.shape[0]\n return y[0, offset:]", "def getDonut(width=2, size=(25, 25, 25)):\n x, y, z = size\n assert width < z / 2\n\n # This is a single planr slice of ring\n ringPlane = getRing(0.25, 0.5, size=(x, y))\n\n # Stack up those slices starting form the center\n donutArray = np.zeros(size, dtype=np.uint8)\n zStart = z // 2\n for n in range(width):\n donutArray[zStart + n, :, :] = ringPlane\n\n return donutArray", "def __getslice__(self, i, j):\n return self.dtrs[i:j]", "def slice(tensor):\n out = tensor[:, 444:524, :]\n return out", "def crop_frame(frame):\n (h,w,c) = frame.shape\n return frame[int(h/2):h, 0:w]", "def _slice(tensor, size, i):\n return tensor[:, i * size : (i + 1) * size]", "def crop(dimension, start, end):\n def func(x):\n if dimension == 0:\n return x[start: end]\n if dimension == 1:\n return x[:, start: end]\n if dimension == 2:\n return x[:, :, start: end]\n if dimension == 3:\n return x[:, :, :, start: end]\n if dimension == 4:\n return x[:, :, :, :, start: end]\n return Lambda(func)", "def _fprop_slice_np(h, stride, H, roi_offset):\n hstart = int(np.floor(float(h) * stride))\n hend = int(np.ceil(float(h + 1) * stride))\n\n hstart = min(max(hstart + roi_offset, 0), H)\n hend = min(max(hend + roi_offset, 0), H)\n\n return slice(hstart, hend), hend - hstart", "def cut_dyadic(x):\n n = x.shape[0]\n j = math.floor(math.log2(n))\n m = 2**j\n return lax.dynamic_slice(x, (0,), (m,))", "def cut_384(img):\n if len(img.shape) > 2:\n ret = img[:, 50:434, 60:444]\n else:\n ret = img[50:434, 60:444]\n return ret", "def crop_image(image):\r\n return image[40:-20, :]", "def get_partial_data(x, keep=200):\n range_x = x.size(1)\n print(\"rangex\", range_x)\n\n range_p = range_x - keep - 50\n n = random.randint(25, range_p)\n return x[:, n:n + keep]", "def to_slice(self):\n return np.index_exp[self.start[2]:self.end[2], #\n self.start[1]:self.end[1], #\n self.start[0]:self.end[0]]", "def calc_line_slice(tik_instance, grads, y, grads_h, loc_h,\n loc_w, n_index, start_c1, end_c1, scale_w):\n in_w = grads.shape[3]\n in_slice_w, out_slice_w = calc_slice_size(scale_w)\n\n grads_ub = tik_instance.Tensor(\n \"float32\", [in_slice_w, 16], name=\"grads_ub\", scope=tik.scope_ubuf)\n y_ub = tik_instance.Tensor(\n \"float32\", [out_slice_w, 16], name=\"y_ub\", scope=tik.scope_ubuf)\n loc_reg = tik_instance.Scalar(dtype=\"int32\")\n index_reg = tik_instance.Scalar(dtype=\"int32\")\n start_out_w = tik_instance.Scalar(dtype=\"int32\")\n mov_out_w = tik_instance.Scalar(dtype=\"int32\")\n\n calc_c1_num = end_c1 - start_c1\n repeat_times = (in_w + in_slice_w - 1) // in_slice_w\n with tik_instance.for_range(0, calc_c1_num) as c1_index:\n\n with tik_instance.for_range(0, repeat_times) as w_index:\n start_out_w.set_as(loc_w[w_index * in_slice_w])\n cp_len = calc_segment(tik_instance, in_w, w_index, in_slice_w)\n # read one line grads\n tik_instance.tensor_mov(grads_ub,\n grads[n_index, start_c1 + c1_index,\n grads_h,\n w_index * in_slice_w, 0],\n '', 1, (cp_len * 16 * 4 + 31) // 32, 0, 0)\n # clear out ub\n clear_ub(tik_instance, y_ub)\n\n with tik_instance.for_range(0, cp_len) as i:\n index_reg.set_as((w_index * in_slice_w) + i)\n index_reg.set_as(loc_w[index_reg])\n loc_reg.set_as(index_reg - start_out_w)\n tik_instance.vadd(16, y_ub[loc_reg, 0], y_ub[loc_reg, 0],\n grads_ub[i, 0], 1, 1, 1, 1, 0, 0, 0)\n\n # move data out\n mov_out_w.set_as(loc_w[(w_index * in_slice_w) + cp_len - 1])\n mov_out_w.set_as(mov_out_w - start_out_w + 1)\n tik_instance.set_atomic_add(1)\n tik_instance.tensor_mov(y[n_index, start_c1 + c1_index,\n loc_h, start_out_w, 0],\n y_ub[0, 0], '', 1,\n (mov_out_w * 16 * 4 + 31) // 32, 0, 0)\n tik_instance.set_atomic_add(0)", "def _get_bounds(x, y, size):\n x = np.array(np.atleast_1d(x))\n y = np.array(np.atleast_1d(y))\n\n lower_x = np.rint(x - size[0]/2)\n lower_y = np.rint(y - size[1]/2)\n\n return np.stack((np.stack((lower_x, lower_x + size[0]), axis=1),\n np.stack((lower_y, lower_y + size[1]), axis=1)), axis=1).astype(int)", "def slice_timeseries(n_slices,dataset):\n\n n,l=np.shape(dataset)\n\n X = np.reshape(dataset,(n*n_slices,l//n_slices))\n\n print('sliced data shape (nr. of slices, slice length):',np.shape(X))\n print('#####################################')\n \n return X" ]
[ "0.5745769", "0.56596357", "0.56081295", "0.5587428", "0.5543565", "0.5526532", "0.5512229", "0.54833853", "0.54370314", "0.5419847", "0.53095645", "0.53095645", "0.5308653", "0.528652", "0.52655846", "0.5227977", "0.5204625", "0.5199836", "0.5189389", "0.5183875", "0.51705194", "0.51109445", "0.5104759", "0.50862575", "0.50756353", "0.50719213", "0.5070528", "0.5068132", "0.50657266", "0.5058614" ]
0.612583
0
The function collect the adjusted close prices of all given stocks in "tickers" and save it in a csv. It's useful for collecting timeseries
def download_stock_price_hist( tickers = [ 'AAPL' ], price_column = 'Adj Close', # assume it's the Adjusted Close price that are interested start = datetime.date( 2009, 12, 31 ), # assume start is guaranteed to be a weekday end = datetime.date( 2015, 12, 31 ), csv_file = "stock_price_test.csv", ): # Check validity of inputs if len( tickers ) <= 0: print "Tickers must not be empty"; return False; if start > end: print "Start date " + start.isoformat() + " can't be later than End date " + end.isoformat(); df = pd.DataFrame(); # data frame to return for _i in range( len(tickers) ): ticker = tickers[_i]; print "Index" + str(_i) + "\t" + "Ticker: " + ticker; start_str = start.isoformat(); end_str = end.isoformat(); hist = ystockquote.get_historical_prices( ticker, start_str, end_str ); # dictionary with date string as the key # Get time series of stock prices (Don't sort before forming the Series!!!) date_index = []; price_data = []; for key, val in hist.iteritems(): date_index.append( datetime.datetime.strptime( key, "%Y-%m-%d" ).date() ); price_data.append( float( val[ price_column ] ) ) if min( date_index ) > start: # Pass if the no stock price is available on Start continue; stock_ts = pd.Series( price_data, date_index ); stock_ts = stock_ts.sort_index(); # Add current stock TS to the DataFrame df[ticker] = stock_ts; df.to_csv( csv_file, index_label='Date' ); return True;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_stock_data(tickers, save=True):\n prices = pd.DataFrame()\n ts = TimeSeries(key='EY2QBMV6MD9FX9CP', output_format='pandas')\n\n for ticker in tickers:\n successful_grab = False\n ticker_daily_adj = None\n\n while successful_grab is not True:\n try:\n ticker_daily_adj = ts.get_daily_adjusted(ticker, outputsize='full')[0]\n successful_grab = True\n except ValueError:\n print('Waiting for API to let me in')\n time.sleep(10)\n\n ticker_daily_adj.loc[:, '0. ticker'] = ticker\n ticker_daily_adj = ticker_daily_adj[sorted(ticker_daily_adj.columns)]\n\n prices = pd.concat([prices, ticker_daily_adj])\n\n prices.sort_index(inplace=True)\n prices.reset_index(inplace=True)\n prices['date'] = pd.to_datetime(prices['date'])\n if save:\n prices.to_csv('stockdata.csv', index=True)\n\n return prices", "def prices(tickers):\n try:\n start = dt.datetime.today()\n start = start.strftime('%Y-%m-%d') \n data = pdr.get_data_yahoo(tickers, start=start)\n price = data['Adj Close']\n vol = data['Volume']\n data_dic = {}\n for stock in tickers:\n data_dic[str(stock)] = price[str(stock)][0], vol[str(stock)][0]\n \n df_data = pd.DataFrame(data_dic.values(), columns=['precio_usa', 'volumen_usa'])\n df_data['Ticker'] = tickers\n df_data = df_data.loc[:,['Ticker', 'precio_usa', 'volumen_usa']]\n\n except:\n start = dt.datetime.today()\n start = start - Day(3)\n start = start.strftime('%Y-%m-%d') \n data = pdr.get_data_yahoo(tickers, start=start)\n price = data['Adj Close']\n vol = data['Volume']\n data_dic = {}\n for stock in tickers:\n data_dic[str(stock)] = price[str(stock)][0], vol[str(stock)][0]\n \n df_data = pd.DataFrame(data_dic.values(), columns=['precio_usa', 'volumen_usa'])\n df_data['Ticker'] = tickers\n df_data = df_data.loc[:,['Ticker', 'precio_usa', 'volumen_usa']]\n\n return df_data", "def retrieve_data_for_all_sp500_tickers(store_in_csv=False):\n df_sp500_info = get_SP500_info()\n symbol_list = list(df_sp500_info['Symbol'])\n symbol_list.append(SP500_INDEX_TICKER)\n symbol_count = 0\n for symbol in symbol_list:\n try:\n df_symbol_data = get_data_for_ticker(symbol)\n if store_in_csv:\n logger.debug(f'writing csv for {symbol}')\n filename = get_filename_for_ticker(symbol)\n df_symbol_data.to_csv(filename)\n except:\n logger.error(f'error processing: {symbol}')\n symbol_count += 1\n\n return symbol_count", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def get_data(ticker, tickers):\n \n print(ticker)\n ## Date setting\n today = datetime.today()\n days_ago_90 = today - timedelta(days = 90)\n today = today.strftime(\"%Y-%m-%d\")\n days_ago_90 = days_ago_90.strftime(\"%Y-%m-%d\")\n \n df_ticker = web.DataReader(ticker, 'yahoo', start = days_ago_90, end = today)\n \n ## To get prices, iloc is used. It's because shifting by timedetlas will result in error in cases where some holidays occured \n price_most_recent = df_ticker.iloc[-1, 5]\n price_7_days_ago = df_ticker.iloc[-7, 5]\n price_21_days_ago = df_ticker.iloc[-21, 5]\n price_30_days_ago = df_ticker.iloc[-30, 5]\n price_90_days_ago = df_ticker.iloc[0,5]\n \n ## Getting price change\n price_change_7_days = price_change(price_most_recent, price_7_days_ago)\n price_change_21_days = price_change(price_most_recent, price_21_days_ago)\n price_change_30_days = price_change(price_most_recent, price_30_days_ago)\n price_change_90_days = price_change(price_most_recent, price_90_days_ago)\n \n ## Checking for constant price drop\n constant_price_drop_7 = constant_price_drop_detector(df_ticker, 7)\n ## Only if price drops constantly for 7 days it makes sense to check for this pattern in 21 days period\n if constant_price_drop_7 == \"YES\":\n constant_price_drop_21 = constant_price_drop_detector(df_ticker, 21)\n else:\n constant_price_drop_21 = \"NO\"\n \n ## Now creating the final df to return\n df_prices = df_ticker[['Adj Close']].T\n df_prices.index = [ticker]\n df_prices.reset_index(inplace = True)\n \n full_name = tickers.loc[tickers[\"Ticker\"] == ticker, 'Full Name'].values[0]\n df_prices['company_name'] = full_name\n df_prices['price_90_days_ago'] = price_90_days_ago\n df_prices['price_30_days_ago'] = price_30_days_ago\n df_prices['price_21_days_ago'] = price_21_days_ago\n df_prices['price_7_days_ago'] = price_7_days_ago\n df_prices['price_most_recent'] = price_most_recent\n \n df_prices['price_change_7_days'] = price_change_7_days\n df_prices['price_change_21_days'] = price_change_21_days\n df_prices['price_change_30_days'] = price_change_30_days\n df_prices['price_change_90_days'] = price_change_90_days\n \n df_prices['constant_price_drop_7'] = constant_price_drop_7\n df_prices['constant_price_drop_21'] = constant_price_drop_21\n \n df_prices.fillna(\"None\", inplace = True)\n \n return df_prices", "def update_binance_data(tickers_intervals):\n for ticker, interval in tickers_intervals:\n try:\n path_to_file = os.path.join(DIR_PATH, interval, f\"{ticker}.csv\")\n csvfile = pd.read_csv(path_to_file, index_col=False, delimiter=\",\")\n starting_date = datetime.utcfromtimestamp(\n csvfile.iloc[-1][\"Open time\"]\n ).strftime(\"%d %b, %Y\")\n print(\n \"Getting historical data for the ticker {} with {} interval starting from {}\".format(\n ticker, interval, starting_date\n )\n )\n\n candlesticks = client.get_historical_klines(\n ticker,\n interval,\n starting_date,\n datetime.now().strftime(\"%d %b, %Y\"),\n limit=1000,\n )\n\n format_date = lambda t: t / 1000\n format_price = lambda p: float(f\"{float(p):.2f}\")\n\n # overriding the last row.\n candlesticks[0][0] = format_date(candlesticks[0][0])\n candlesticks[0][1] = format_price(candlesticks[0][1])\n candlesticks[0][2] = format_price(candlesticks[0][2])\n candlesticks[0][3] = format_price(candlesticks[0][3])\n candlesticks[0][4] = format_price(candlesticks[0][4])\n csvfile.loc[len(csvfile) - 1] = candlesticks[0][:-1]\n for candlestick in candlesticks[1:]:\n candlestick[0] = format_date(candlestick[0])\n candlestick[1] = format_price(candlestick[1])\n candlestick[2] = format_price(candlestick[2])\n candlestick[3] = format_price(candlestick[3])\n candlestick[4] = format_price(candlestick[4])\n csvfile.loc[len(csvfile)] = candlestick[:-1]\n\n csvfile.to_csv(path_to_file, index=False)\n except Exception as e:\n print(e)", "def taq_quotes_trades_year_statistics_data(tickers, year):\n\n function_name = taq_quotes_trades_year_statistics_data.__name__\n\n # Create a file to save the info\n file = open('../taq_quotes_trades_year_statistics_data.csv', 'a+')\n file.write('Ticker, avg_quotes, avg_trades, avg_spread\\n')\n\n for ticker in tickers:\n\n taq_data_tools_statistics \\\n .taq_function_header_print_data(function_name, ticker, ticker,\n year, '', '')\n\n dates = taq_data_tools_statistics.taq_bussiness_days(year)\n\n stat = []\n args_prod = iprod([ticker], dates)\n\n # Parallel computation of the statistics. Every result is appended to\n # a list\n with mp.Pool(processes=mp.cpu_count()) as pool:\n stat.append(pool.starmap(taq_quotes_trades_day_statistics_data,\n args_prod))\n\n # To obtain the average of the year, I average all the results of the\n # corresponding values (number quotes, trades and avg spread)\n stat_year = np.nanmean(stat[0], axis=0)\n\n # Write data in file\n file.write(f'{ticker}, {stat_year[0]:.0f}, {stat_year[1]:.0f},'\n + f' {stat_year[2]:.2f}\\n')\n\n file.close\n\n return None", "def get_crypto_daily_price(cryptotickers = [], allData=False,limit = 90):\n api_key = os.getenv(\"CC_API\")\n ticker_list = cryptotickers\n crypto_df = pd.DataFrame()\n\n for ticker in ticker_list:\n #if allData is true, then it gets all the data available. If not, select data according to limit.\n if allData:\n url = f\"https://min-api.cryptocompare.com/data/v2/histoday?fsym={ticker}&tsym=USD&allData=true&api_key={api_key}\"\n else:\n url = f\"https://min-api.cryptocompare.com/data/v2/histoday?fsym={ticker}&tsym=USD&limit={limit}&api_key={api_key}\"\n \n raw_data = read_json(url)\n #print(json.dumps(raw_data, indent=5))\n df = pd.DataFrame(raw_data['Data']['Data'])\n df['time'] = pd.to_datetime(df['time'],unit='s')\n df.set_index(df['time'], inplace=True)\n df['close'] = df['close'].astype(float)\n crypto_df[ticker] = df['close']\n \n #\n new_columns = pd.MultiIndex.from_product([ crypto_df.columns, [\"close\"] ])\n crypto_df.columns = new_columns\n\n return crypto_df", "def download_all_stocks():\n stocks = get_stocklist()\n dfs = {}\n for i, r in stocks.iterrows():\n start = time.time()\n s = r['Ticker']\n stockfile = '../stockdata/' + s + '.csv.gz'\n print('downloading', s)\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n print('took', time.time() - start, 's')\n\n return dfs", "def get_prices(start, end):\n\n tickers = TICKERS # fetch tickers from config.py\n df_final = pd.DataFrame() # declared for merging purposes (inside loops)\n\n for ticker in tickers: # Loop over tickers to fetch individual price series\n\n r = requests.get(\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=\" + ticker\n + \"&outputsize=full&apikey=\" + ALPHAVANTAGE_KEY)\n r_dict = r.json()\n\n dates = np.array([]) # this loop makes the index into an index of datetime objects. Note the format.\n for i in r_dict['Time Series (Daily)'].keys():\n datetime_obj = datetime.datetime.strptime(i, '%Y-%m-%d')\n dates = np.append(dates, datetime_obj)\n\n prices = np.array([]) # This loop extracts all prices and put them into an array\n for i in r_dict['Time Series (Daily)']:\n x = r_dict['Time Series (Daily)'][i]['5. adjusted close']\n prices = np.append(prices, x)\n\n open_prices = np.array([]) # grab opening prices as well\n for i in r_dict['Time Series (Daily)']:\n x = r_dict['Time Series (Daily)'][i]['1. open']\n open_prices = np.append(open_prices, x)\n\n df = pd.DataFrame({ # This dataframe contains each individual stock\n 'Date': dates,\n str(ticker + '_' + 'adjclose'): prices,\n str(ticker + '_' + 'open'): open_prices\n })\n df = df.set_index('Date')\n\n df_final = pd.DataFrame(data=df_final,\n index=dates) # these few lines are for merging the individual dataframes\n df_final.index.name = 'Date'\n df_final = df.merge(df_final, left_index=True, right_index=True)\n\n for ticker in tickers: # convert to numeric values. Prices are just \"objects\"\n df_final[str(ticker + '_' + 'adjclose')] = pd.to_numeric(df_final[str(ticker + '_' + 'adjclose')])\n df_final[str(ticker + '_' + 'open')] = pd.to_numeric(df_final[str(ticker + '_' + 'open')])\n\n df_final = df_final.iloc[::-1]\n\n return df_final[start: end] # slice the dataframe at the end, only return the specified date-range.", "def get_prices(tickers,\n start,\n end,\n types=None,\n data_source='tiingo',\n out_path=None,\n sort_tks=False,\n api_key=None):\n if isinstance(tickers, str):\n tickers = [tickers]\n if isinstance(types, str):\n types = [types]\n if (sort_tks):\n tickers = sorted(tickers)\n if data_source not in SOURCES:\n raise ValueError(\n 'data_source must be one of {SOURCES}.'\n )\n\n print(f'Downloading prices from {data_source.capitalize()}...')\n df = get_prices_from_source(tickers, start, end, data_source, types,\n api_key)\n\n if out_path is not None:\n try:\n df.to_csv(out_path)\n print(\"Results saved to: \", out_path)\n except (IOError, PermissionError):\n Warning(\"Failed to output to file!\")\n print(\"Download finished.\")\n return df", "def store_stock_data(stock_name = 'TSLA'):\n stonk = yf.Ticker(stock_name) # gets stock data from yahoo\n hist = stonk.history(period=\"max\") # historical stock prices\n hist.reset_index(inplace=True) # takes the date stamp out of the index column\n hist.rename(columns = {'Date':\"DateTime\"},inplace=True) # Changes the name of the date column\n hist['DateTime'] = pd.to_datetime(hist['DateTime'],utc=True) # Changes the timestamps to UTC\n hist.to_csv('../data/raw/'+stock_name+'_stock_price.csv')\n return", "def portfolio_download_data(tickers: List[str], dates: List[str],\n time_step: str) -> None:\n\n try:\n function_name: str = portfolio_download_data.__name__\n download_data_tools \\\n .function_header_print_data(function_name, tickers, dates,\n time_step)\n\n init_year = int(dates[0].split('-')[0])\n init_month = int(dates[0].split('-')[1])\n fin_year = int(dates[1].split('-')[0])\n fin_month = int(dates[1].split('-')[1])\n last_day = monthrange(fin_year, fin_month)[1]\n\n init_date: dt = dt(year=init_year, month=init_month, day=1)\n fin_date: dt = dt(year=fin_year, month=fin_month, day=last_day)\n\n # Not all the periods can be combined with the time steps.\n raw_data: pd.DataFrame = \\\n yf.download(tickers=tickers, start=init_date, end=fin_date,\n interval=time_step)['Adj Close']\n # Order DataFrame columns by sector\n raw_data = raw_data[tickers]\n\n if raw_data.isnull().values.any():\n # Remove stocks that do not have data from the initial date\n raw_data = raw_data.dropna(axis=1, thresh=len(raw_data) - 10) \\\n .fillna(method='ffill')\n\n download_data_tools.save_data(raw_data, dates, time_step)\n\n except AssertionError as error:\n print('No data')\n print(error)", "def final_series():\n tickers = pd.read_excel(os.path.abspath(os.path.dirname(__file__)) +\"./codigos.xlsx\", \n header=[0]).values.flatten()\n # tickers = pd.read_excel(\"./codigos.xlsx\", \n # header=[0]).values.flatten()\n ls = fetch_series(list(set(tickers)))\n net_series = [s for s in ls if _cleasing(s, [\"D\", \"M\"]) is not None]\n p = os.path.abspath(os.path.dirname(__file__))\n with open(p + \"/series_bcb\", \"wb\") as f:\n pickle.dump(net_series, f)\n # with open(\"./series_bcb\", \"wb\") as f:\n # pickle.dump(net_series, f) ", "def download_stock_data(symbol, interval, period):\n\tprices = yf.Ticker(symbol).history(period=period, interval=interval)\n\tif len(prices) > 0:\n\t\tresult_file = '../data/' + symbol + '.csv'\n\t\tprices.to_csv(result_file)\n\t\tprint(f\"Downloaded stock data of {stocks[s]}, data shape {prices.shape}, saved as {result_file}\")", "def get_data_from_yahoo():\n try:\n ticker = input('Enter the ticker symbol: ').upper()\n start = dt.datetime(2004, 8, 19)\n end = dt.datetime.today()\n\n df = web.DataReader(ticker, 'yahoo', start, end)\n df.to_csv('stock_data.csv')\n except Exception as e:\n print(e)\n exit()", "def taq_midpoint_year_statistics_data(tickers, year):\n\n function_name = taq_quotes_trades_year_statistics_data.__name__\n\n # Create a file to save the info\n file = open('../taq_midpoint_year_statistics_data.csv', 'a+')\n file.write('Ticker, Difference\\n')\n\n for ticker in tickers:\n\n taq_data_tools_statistics \\\n .taq_function_header_print_data(function_name, ticker, ticker,\n year, '', '')\n\n dates = taq_data_tools_statistics.taq_bussiness_days(year)\n\n stat = []\n args_prod = iprod([ticker], dates)\n\n # Parallel computation of the statistics. Every result is appended to\n # a list\n with mp.Pool(processes=mp.cpu_count()) as pool:\n stat.append(pool.starmap(taq_midpoint_day_statistics_data,\n args_prod))\n\n # To obtain the average of the year, I average all the results of the\n # corresponding value\n stat_year = np.nanmean(stat[0], axis=0)\n\n # Write data in file\n file.write(f'{ticker}, {stat_year}\\n')\n\n file.close\n\n return None", "def __get_all_data(self,tickr):\n self.__csvurl=f\"https://query1.finance.yahoo.com/v7/finance/download/{tickr}?period1=1092873600&period2={int(datetime.now().timestamp())}&interval=1d&events=history&includeAdjustedClose=true\"\n s=get_historic_data(self.__csvurl)\n\n \"\"\"you should not be able to access dataframe from outside the class\"\"\"\n df=pd.read_csv(io.StringIO(s.decode('utf-8')))\n df=df.dropna()\n df_columns=['Date','High','Low','Close','Adj Close']\n\n if not set(df_columns).issubset(df.columns):\n raise ValueError(f\"One or more columns are missing {df_columns}\")\n\n if len(df.index)<5:\n raise ValueError(f\"Cannot calculate EMA 5\")\n\n if len(df.index)<20:\n raise ValueError(f\"Cannot calculate SMA 20\")\n\n \"\"\"set date as index (required for filtering,sorting,grouping etc etc\"\"\"\n df['Date'] = pd.to_datetime(df['Date'], format = '%Y-%m-%d')\n\n df.set_index(['Date'], inplace=True)\n\n\n return df", "def get_tiingo_prices(tickers, start, end, api_key=None):\n\n all_results = []\n if api_key is None:\n api_key = os.getenv('TIINGO_API_KEY')\n # Sort tickers so that error logging can be used to identify progress\n tickers = sorted(tickers)\n\n for i, ticker in enumerate(tickers):\n try:\n df = web.DataReader(name=ticker,\n data_source='tiingo',\n start=start,\n end=end,\n api_key=api_key)\n df = df[['adjClose']]\n except KeyError as e:\n if e.args[0] == 'date':\n # Patch to handle issue in pandas_datareader\n # where empty results cause a KeyError\n print(f'Got empty df for i={i}, ticker={tickers[i]}')\n df = pd.DataFrame()\n except Exception as e:\n print('Received an unexpected error:', e)\n print(f'Only fetched up to {i-1} inclusive. Returning.')\n return pd.concat(all_results)\n\n if (i % 50 == 0) and i > 0:\n # Sleep to avoid timeouts. Empirically found 20s to be sufficient\n time.sleep(20)\n\n all_results.append(df)\n return pd.concat(all_results)", "def get_tickers():\n\turl = \"https://api.iextrading.com/1.0/ref-data/symbols\"\n\t\n\ttry:\n\t\tresponse = requests.get(url)\n\t\tif str(response.status_code) == \"200\":\n\t\t\tprint(\"[UPDATE]: Downlaoding Tickers from iextrading API\")\n\t\t\tjson_stock_data = response.json()\n\n\t\t\tpd_stock = pandas.DataFrame(json_stock_data)\n\t\t\t# DataFrame Format\n\t\t\t# date iexId isEnabled name symbol type\n\t\t\t# 0 2019-02-12 2 True Agilent Technologies Inc. A cs\n\n\t\t\tprint(\"[SUCCESS]: Downloaded {} symbols from IEX.\".format(len(pd_stock.index)))\n\n\t\t\treturn pd_stock\n\n\t\telse:\n\t\t\tprint(\"[ERROR]: Download from IEX failed.\")\n\t\t\treturn \"ERROR\"\n\texcept Exception as e:\n\t\tprint(\"[ERROR]: {}\".format(e))\n\t\treturn \"ERROR\"", "def request_binance_data(tickers_intervals, starting_date, ending_date):\n for ticker, interval in tickers_intervals:\n print(\n \"Getting historical data for the ticker {} with {} interval\".format(\n ticker, interval\n )\n )\n try:\n csvfile = open(\n os.path.join(DIR_PATH, interval, \"{}.csv\".format(ticker)),\n \"w\",\n newline=\"\",\n )\n candlestick_writer = csv.writer(csvfile, delimiter=\",\")\n\n candlesticks = client.get_historical_klines(\n ticker, interval, starting_date, ending_date\n )\n\n candlestick_writer.writerow(headers)\n\n for candlestick in candlesticks:\n candlestick[0] = candlestick[0] / 1000\n candlestick_writer.writerow(candlestick)\n\n csvfile.close()\n except Exception as e:\n print(e)", "def write_csv(self, stock_list):\n\n with open(self.outfile, 'w') as outfile:\n writer = csv.writer(outfile, delimiter=',',\n quoting=csv.QUOTE_MINIMAL)\n for symbol, values in stock_list.items():\n # Need to find a better way to handle this...\n writer.writerow([values['symbol'], values['name']])", "def fetch_series(tickers: List[str]) -> List[dict]:\n with requests.Session() as session:\n c = suds.client.Client(\n 'https://www3.bcb.gov.br/sgspub/JSP/sgsgeral/FachadaWSSGS.wsdl',\n transport=suds_requests.RequestsTransport(session))\n \n def _fetch(tck):\n try:\n resp = c.service.getUltimoValorVO(tck)\n if resp is not None:\n return _process_info(resp)\n except:\n tcks_off.append(tck)\n\n with executor() as e:\n ls = list(e.map(_fetch, tickers))\n return ls", "def fetch(tickers: List[str], limit:Optional[int]=None):\n srs = [tck.split(\".\")[1] for tck in tickers]\n resp = requests.get(URL, verify=False)\n if resp.ok:\n df = pd.read_csv(StringIO(resp.text), delimiter=\";\", decimal=\",\")\n else:\n logger.error(f\"Data from {resp.url} not availbe at the moment\")\n print(\"Data not available\")\n df[\"dates\"] = df.loc[:, [\"CO_ANO\", \"CO_MES\"]].apply(lambda x: dt(x[0], x[1], 1), axis=1)\n df[\"series\"] = df.loc[:, [\"TIPO\", \"TIPO_INDICE\"]].apply(lambda x: f\"{x[0]}_{x[1]}\", axis=1)\n dff = df.pivot(index=\"dates\", columns=\"series\", values=\"INDICE\").loc[:, srs]\n df_final = dff if limit is None else dff.tail(limit)\n for col in df_final:\n for ind in df_final.index:\n add_obs(f\"COMEX.{col}\", ind, df_final.loc[ind, col])", "def get_stock_prices(ticker_symbol, start_date, finnhub_client):\n end_date = pd.Timestamp(pd.Timestamp.today().date())\n end_unix = get_unix_time(end_date)\n start_unix = get_unix_time(start_date)\n\n # Pause shortly\n time.sleep(1)\n\n # Stock candles\n res = finnhub_client.stock_candles(ticker_symbol, 'D', start_unix, end_unix)\n if res[\"s\"] == \"no_data\":\n return pd.DataFrame()\n # Convert to Pandas Dataframe\n df_finnhub = pd.DataFrame(res)\n timestamp_index = df_finnhub[\"t\"].apply(lambda x: pd.Timestamp(pd.to_datetime(x, unit='s', origin='unix').date()))\n df_ticker = pd.DataFrame(df_finnhub[\"o\"].values, index=timestamp_index.values)\n return df_ticker", "def get_stock_prices(ticker, start_date, end_date=None):\n if end_date is None:\n end_date = dt.date.today()\n\n shares = Share(ticker)\n df = pd.DataFrame(shares.get_historical(start_date.isoformat(),\n end_date.isoformat()))\n return df.set_index(\"Date\", drop=True) \\\n .drop(\"Symbol\", axis=1) \\\n .astype(float) \\\n .sort_index()", "def write_results_to_file(stocks_to_write):\n date = datetime.date.today()\n date_str = str(date.year) + '-' + str(date.month) + '-' + str(date.day)\n file_name_core = 'results-' + date_str\n\n with open(\"results/txt/\" + file_name_core + \".txt\",\n 'w') as txt_results_file:\n\n for stock in stocks_to_write:\n txt_results_file.write(stock.make_one_line_report() + \"\\n\")\n\n with open(\"results/csv/\" + file_name_core + \".csv\",\n 'w') as csv_results_file:\n\n writer = csv.writer(csv_results_file)\n writer.writerow(StockData.get_csv_data_headings())\n for stock in stocks_to_write:\n writer.writerow(stock.get_csv_data_list())", "def compile_data():\r\n with open('sp500_tickers.pickle', 'rb') as file:\r\n tickers = pickle.load(file)\r\n metasp = pd.DataFrame()\r\n for count, ticker in enumerate(tickers):\r\n df = pd.read_csv('sp500_data\\{}.csv'.format(ticker))\r\n df.set_index('Date', inplace=True)\r\n df.rename(columns={'Adj Close': ticker}, inplace=True)\r\n df.drop(['Open', 'High', 'Low', 'Close', 'Volume'], 1, inplace=True)\r\n if metasp.empty:\r\n metasp = df\r\n else:\r\n metasp = metasp.join(df, how = 'outer')\r\n if count % 10 == 0:\r\n print(count)\r\n metasp.to_csv('sp500_meta.csv')", "def SaveToCSV(self):\n import csv \n csvfile = open(f\"Cache/{self.symbol}.csv\", \"w\", newline='')\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow([self.symbol, self.name, self.market])\n writer.writerow(['Latest P/E Ratio:', self.pe_ratio])\n writer.writerow(['Short Percent of Float:', self.short_percent_of_float])\n writer.writerow(['Date', 'Price', 'Dividend', 'Annualized Dividend'])\n for snapshot in self._history:\n writer.writerow([snapshot.date.strftime(\"%m/%d/%Y\"), snapshot.price, snapshot.dividend, snapshot.annualDividend])\n csvfile.close()\n print(f\"{self.name} saved to /Cache/{self.symbol}.csv\")", "def gethistory(ticker):\n link = 'http://ichart.finance.yahoo.com/table.csv?s=' + ticker\n response = urllib.urlopen(link)\n html = response.read()\n return readcsv(html)" ]
[ "0.7676052", "0.65778995", "0.6491287", "0.62602", "0.62568295", "0.6210967", "0.6161625", "0.6123165", "0.60736", "0.6038035", "0.6005312", "0.598648", "0.59670985", "0.5935379", "0.59336746", "0.5927037", "0.5919231", "0.5854586", "0.5815284", "0.5735999", "0.57307273", "0.570795", "0.570262", "0.5684336", "0.56821156", "0.5657788", "0.56465405", "0.5646469", "0.56377435", "0.56345236" ]
0.68193996
1
drop the db after each test
def tearDown(self): db.drop_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tearDown(self):\n app.db.drop_database('local')\n app.db.close()", "def tearDown(self):\n with app.app_context():\n db = app.db.get_db()\n cur = db.cursor()\n with app.open_resource('sql/drop_tests.sql', mode='r') as f:\n cur.execute(f.read())\n db.commit()\n cur.close()\n db.close()", "def tearDown(self):\n self.db.drop_all()\n pass", "def teardown():\n teardown_db()", "def tearDown(self):\n #db.session.remove()\n db.drop_all()", "def tearDown(self):\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n get_connection().drop_database('test_monstor_registration')", "def tearDown(self):\n\t\twith self.app.app_context():\n\t\t\tdb.session.remove()\n\t\t\tdb.drop_all()", "def tearDown(self):\n\t\twith self.app.app_context():\n\t\t\tdb.session.remove()\n\t\t\tdb.drop_all()", "def tearDown(self) -> None:\n things.db.session.remove()\n things.db.drop_all()", "def tearDown(self):\n with self.app.app_context():\n db.session.remove()\n db.drop_all()", "def tearDown(self):\n db.session.commit()\n db.drop_all()" ]
[ "0.8770249", "0.87615293", "0.8755055", "0.87394303", "0.8652944", "0.8602954", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.85897136", "0.8586974", "0.85793304", "0.85793304", "0.8561795", "0.85093904", "0.8507319" ]
0.87856054
0
Ensure id is correct for the current/logged in user
def test_get_by_id(self): with self.client: self.client.post('/users/login', data=dict( username="eschoppik", password='secret' ), follow_redirects=True) self.assertTrue(current_user.id == 1) self.assertFalse(current_user.id == 20)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_id(self, id):", "def test_wrong_id(self):\n self.request.matchdict = {'user_id': int(self.request.user.id)+4}\n self.request.json_body = {}\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def get_id(self): \n\t\treturn (self.user_id)", "def test_user_id_get(self):\n pass", "def id(self) -> int:\n return self.user.id", "def test_get_id(self):\r\n # the migration adds an initial admin user to the system\r\n user = UserMgr.get(user_id=1)\r\n self.assertEqual(\r\n user.id,\r\n 1,\r\n \"Should have a user id of 1: \" + str(user.id))\r\n self.assertEqual(\r\n user.username,\r\n 'admin',\r\n \"Should have a username of admin: \" + user.username)", "def validate_user_id(self, value):\n if not User.objects.filter(id=value).exists():\n raise serializers.ValidationError('User with this id does not exist.')\n return value", "def test_user_id(self):\n new_user = self.app\n self.assertTrue(new_user.user_id, 0)\n new_user.create_user()\n self.assertTrue(new_user.user_id, 1)\n for key in new_user.users:\n self.assertEqual(new_user.user_id, key)", "def get_accessible_user_id(self):\n ### DATABASE CODE GOES HERE\n return 1", "def get_id(self) -> int:\n return self.user_id", "def get_id(self):\n return self.user_id", "def test_user_id_put(self):\n pass", "def check_player_id(self):\n if self.player_id == 'me':\n profile = self.profile\n self.player_id = profile['id']", "def get_user_id(self):\n return self.id_user", "def same_user(user_id):\n return user_id == login_session['user_id']", "def user_id(self):\n return self.status.user[\"id\"]", "def get_current_user_id():\n user = get_current_user()\n return user.pk if user and user.is_authenticated else None", "def _get_user_id(self, user: Optional[Dict[str, Any]]) -> Optional[str]:\n return user[\"id\"] if user and \"id\" in user else None", "def get_id(self):\r\n return self.username", "def get_user_primary_key(self, request):\r\n try:\r\n return request.user.pk\r\n except AttributeError:\r\n return ''", "def get_user(id):\n pass", "def test_user_id_default(self):\n r = Review()\n self.assertEqual(\"\", r.user_id)", "def __int__(self):\r\n return self.userid", "def get_user_id():\n user_id = session.get(\"user_id\")\n return user_id if user_id else None", "def get_user_id(self, session, **kwargs):\n return None", "def userDocumentId(self, id: str) -> str:", "def update_user(id):\n pass", "def get_user_id(self):\n raise NotImplementedError", "def test_get_user_id(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n # for patient\n self.assertEqual(\n PATIENT_ID, self.connection.get_user_id(PATIENT_USERNAME))\n # for doctor\n self.assertEqual(\n DOCTOR_ID, self.connection.get_user_id(DOCTOR_USERNAME))", "def test_id_uniqueness(self):\n user_2 = User()\n self.assertNotEqual(self.user_1.id, user_2.id)" ]
[ "0.71248406", "0.69235414", "0.69196355", "0.6904955", "0.6854695", "0.68270594", "0.67593163", "0.67175955", "0.67118835", "0.6628256", "0.6587404", "0.6533205", "0.6528264", "0.647162", "0.6466161", "0.6448794", "0.6445844", "0.6387218", "0.6369543", "0.6369119", "0.63514787", "0.63328636", "0.6306904", "0.62996966", "0.6297102", "0.62964493", "0.6253731", "0.6227992", "0.6213079", "0.61909044" ]
0.70606107
1
Ensure that the login page loads correctly
def test_login_page_loads(self): response = self.client.get('/users/login') self.assertIn(b'Please login', response.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_login(self, resp: ResponseContextManager) -> None:\n is_login_page = '__appianCsrfToken' in resp.cookies\n if resp.ok and is_login_page:\n self.login()\n elif not resp.ok:\n # Check login page actually returns a csrf token\n login_page_resp = self.get_page('/suite/', label=\"Login.LoadUi\", check_login=False)\n if login_page_resp.ok and '__appianCsrfToken' in login_page_resp.cookies:\n self.login()", "def i_am_in_the_login_page(browser):", "def handle_needs_login():\n flash(\"You must be logged in to access this page.\")\n return redirect(url_for('auth.login', next=request.path))", "def login():\n login_page = Login()\n login_page.login_main_page()", "def test_login_page(self):\n r = requests.get(self.url)\n self.assertEqual(r.status_code, 200)\n soup = BeautifulSoup(r.content)\n self.assertEqual(soup.findAll('legend')[0].contents[0], 'Sign In')", "def check_user_and_login(self) -> Response:\n pass", "def login():", "def login():", "def test_loginpage_view(self):\n response = self.client.get(url_for('login'))\n self.assertEqual(response.status_code, 200)", "def test_valid_login_form_but_failed_authentication(self):\n\n\n\t\tpass", "def should_be_login_url(self) -> None:\n assert \"login\" in self.browser.current_url, \"URL not login page\"", "def login(self):\n\n self.__login_if_required()", "def should_be_login_form(self) -> None:\n assert self.is_element_present(*LoginPageLocators.LOGIN_FORM), \"Login form is not presented\"", "def test_login_required(self):\n\n response = self.client.get(reverse(\"upload-form\"))\n\n # See if a temporary redirect happens...\n self.assertEqual(response.status_code, 302)\n\n # Slightly redundant, but could alert early about unexpected changes/issues.\n self.assertIsInstance(response, HttpResponseRedirect)\n self.assertTrue(response.has_header(\"Location\"))\n\n # Quick check, totally ignorant of the ?next= parameter.\n self.assertTrue(response.get(\"Location\").startswith(settings.LOGIN_URL))", "def login_menu(self):\n print(\"\\nPlease enter your email and password\")\n email = self.validate_email()\n password = self.validate_password()\n self.authenticate_user(email, password)", "def login(self):", "def test_home_page_redirect_when_loggin_attempt_successful(self):\n\t\tpass", "def login_form():\n # if request.method == \"GET\":\n return render_template('login.html')", "def view_login(self):\n with self.client.get(\"/login\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.failure(\"Logged on: Got redirect to /home\")", "def test_unauthenticated(self):\n self.browser.open(\"http://nohost/plone/full_review_list\")\n self.assertTrue(\"Login Name\" in self.browser.contents)", "def validate_login():\n # Locating the button on the top navigation bar\n button_login = My.search_clickable_webelement(\n driver, By.XPATH, \"//*[@id='ypgBody']/div[1]/header/div/div/div/div/div[3]/ul/li[5]\")\n assert button_login\n button_login.click()\n\n # Validating that the pop up window is present\n window = My.search_presence_webelement(driver, By.XPATH, \"//*[@id='ypModal']/div/div\")\n assert window", "def test_show_login_page(self):\n with self.client as c:\n\n res = c.get(\"/login\")\n html = res.get_data(as_text=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Don't have an account?\", html)", "def test_voluntary_login(self):\n # Going to the login form voluntarily:\n resp = self.app.get('/login', status=200)\n form = resp.form\n # Submitting the login form:\n form['login'] = 'manager'\n form['password'] = 'managepass'\n post_login = form.submit(status=302)\n # Being redirected to the home page:\n ok_(post_login.location.startswith('http://localhost/post_login'))\n home_page = post_login.follow(status=302)\n ok_('authtkt' in home_page.request.cookies,\n 'Session cookie was not defined: %s' % home_page.request.cookies)\n eq_(home_page.location, 'http://localhost/')", "def login(self):\n\t\treturn", "def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False", "def test_login_view(self):\n response = self.client.get(url_for('users.login'))\n self.assertEqual(response.status_code, 200)", "def test_professor_can_login_to_web_portal(professor):", "def test_login_view(self):\n response = self.client.get(url_for('login'))\n self.assertEqual(response.status_code, 200)", "def check_login_response(self, response):\n\t\tprint(response.body)\n\t\tif login_error[0] not in response.body.decode('utf-8'):\n\t\t self.log(\"Successfully logged in. Let's start crawling!\")\n\t\t # Now the crawling can begin..\n\t\t return self.initialized()\n\t\t print(\"logged in\")\n\t\telse:\n\t\t self.log(\"Bad times :(\")\n\t\t # Something went wrong, we couldn't log in, so nothing happens.", "def wait_for_the_login_page_to_appear(driver):\n # to make sure the UI is refresh for the login page\n assert wait_on_element(driver, 240, '//input[@data-placeholder=\"Username\"]')\n assert wait_on_element(driver, 240, '//p[text()=\"HA is enabled.\"]')" ]
[ "0.73760843", "0.71508247", "0.7103101", "0.70669746", "0.70501286", "0.7037683", "0.69945127", "0.69945127", "0.69325566", "0.693044", "0.6920101", "0.6901194", "0.6886291", "0.6876428", "0.6860329", "0.6851631", "0.6819138", "0.67941445", "0.6789541", "0.6769932", "0.67687786", "0.67631817", "0.6758542", "0.6699304", "0.6695541", "0.66830385", "0.6675987", "0.66707546", "0.66630924", "0.66467863" ]
0.74059683
0
Get the absolute position of a corner.
def absolute_position(corner: str): screen = ui.main_screen().rect if corner == Corner.TOP_LEFT: return (0, 0) elif corner == Corner.TOP_RIGHT: return (screen.width, 0) elif corner == Corner.BOTTOM_LEFT: return (0, screen.height) elif corner == Corner.BOTTOM_RIGHT: return (screen.width, screen.height) else: raise ValueError(f'Invalid corner: "{corner}"')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def get_absolute_pos(x, y, base):\n\n # give a small deadzone\n new_x = base[0] + (int(x / 2) if abs(x) > 2 else 0)\n new_y = base[1] - (int(y / 2) if abs(y) > 2 else 0)\n\n return (new_x, new_y)", "def get_position(self):\n return self._border.get_position()", "def get_pos(self):\n return self.rect.midtop", "def getAbsCoords( self, x=None, y=None ):\n\n if x is None:\n x = self.x\n if y is None:\n y = self.y\n\n p = self.parent\n\n absX = 0\n absY = 0\n\n while( p != None ):\n absX += p.x\n absY += p.y\n\n p = p.parent\n\n absX += x\n absY += y\n\n return absX, absY", "def top_coords_absolute(self):\n pass", "def topRightCorner(self):\n self._updateExtents()\n return (self._mMaxX,self._mMinY)", "def origin(self):\r\n\r\n return self.ox, self.oy, self.oz", "def get_pos(self) -> tuple:\n return self.rect.center", "def absolute_collide_bottomright(self):\n x,y = self.position\n rect = self.collide_rect\n return (x+rect.width/2, y)", "def getupperleft(self):\n return (self.rect.x, self.rect.y)", "def GetLoCorner(self):\n ...", "def corners(self):\n x0, y0, width, height = self._rect_bbox\n xc = x0, x0 + width, x0 + width, x0\n yc = y0, y0, y0 + height, y0 + height\n transform = self._get_rotation_transform()\n coords = transform.transform(np.array([xc, yc]).T).T\n return coords[0], coords[1]", "def absolute_to_relative(self, x, y):\n rel_x = (x - self.width / 2) / (self.width / 2)\n if rel_x > 1:\n rel_x = 1\n elif rel_x < -1:\n rel_x = -1\n\n rel_y = (self.height / 2 - y) / (self.height / 2)\n if rel_y > 1:\n rel_y = 1\n elif rel_y < -1:\n rel_y = -1\n\n return rel_x, rel_y", "def get_origin(self):\n return self.coord_cls(x=0, y=0, z=0, system=self)", "def get_origin(self):\n return self.coord_cls(x=0, y=0, system=self)", "def GetHiCorner(self):\n ...", "def topLeftCorner(self):\n self._updateExtents()\n return (self._mMinX,self._mMinY)", "def absolute_collide_topleft(self):\n x,y = self.position\n rect = self.collide_rect\n return (x-rect.width/2, y-rect.height)", "def find_center(self):\n return(Point(self.corner.x + self.width/2.0, self.corner.y + self.height/2.0))", "def bottomLeftCorner(self):\n self._updateExtents()\n return (self._mMinX,self._mMaxY)", "def coordinates_abs(self, source):\n if not hasattr(self, 'azimuth'):\n return self.center_abs(source)\n else:\n return (*self.center_abs(source), self.azimuth, self.elevation)", "def get_xy(self):\r\n return self.board.get_xy()", "def get_origin(self) -> Vec:\n size_min, size_max = self.get_bbox()\n origin = (size_min + size_max) / 2\n return origin", "def pos(self):\n return self.bbox().pos(self.offset)", "def get_corners(self):\n (x_coord, y_coord) = (self.x_coord[0], self.y_coord[0])\n corner0 = (x_coord, y_coord)\n corner1 = (x_coord + self.size, y_coord)\n corner2 = (x_coord + self.size, y_coord + self.size)\n corner3 = (x_coord, y_coord + self.size)\n return (corner0, corner1, corner2, corner3)", "def get_origin(self) -> Vec:\n if self.is_brush():\n bbox_min, bbox_max = self.get_bbox()\n return (bbox_min + bbox_max) / 2\n else:\n return Vec.from_str(self['origin'])", "def upright(self):\n return Coord([self.x + 1, self.y - 1])", "def top_left(self):\n return Point(self.left, self.top)" ]
[ "0.6634786", "0.6634786", "0.6623731", "0.6619794", "0.6616437", "0.6604719", "0.6572081", "0.6554048", "0.65352184", "0.6524717", "0.65233713", "0.6521669", "0.64607877", "0.645872", "0.6423324", "0.6406516", "0.6397813", "0.6383624", "0.63822013", "0.6361139", "0.63440365", "0.63438636", "0.62795883", "0.62792623", "0.6254663", "0.62366205", "0.6236538", "0.6163371", "0.6153466", "0.6145838" ]
0.7907764
0
Click a position, relative to a corner.
def corner_click(position: Corner) -> None: actions.self.corner_hover(Corner) actions.mouse_click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def click(pos=(0, 0)):\n pyautogui.click(x=pos[0], y=pos[1])", "def click(xy, offset_xy=(0,0)):\n (x,y) = xy\n (offset_x, offset_y) = offset_xy\n x = x + offset_x\n y = y + offset_y\n move_to((x,y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)", "def click(self, position):\n w, h = self.window.size\n sx, sy = self.tictactoe.size\n rx, ry = position\n x, y = sx * rx // w, sy * ry // h\n if self.tictactoe.available((x, y)):\n self.choice = (x, y)", "def apply_click(self, pos):\n if self.drawmode & self.DRAW_CIRCLE:\n self.click_bits(pos)\n else:\n self.click_bit(pos)", "def fake_click(self, x, y):\n # Currently only restacks windows, and does not trigger bindings\n self.manager.c.eval(\n textwrap.dedent(\n f\"\"\"\n self.core.warp_pointer({x}, {y})\n self.core._focus_by_click()\n \"\"\"\n )\n )", "def game_click(coord):\n mouseclick(coord[0], coord[1])\n time.sleep(0.5)", "def click_by_location(self, elem, **kwargs):\n loc = elem.location\n size = elem.size\n screen_size = self.driver.get_window_size()\n if self.tablet:\n if kwargs['side'] == 'middle':\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'left':\n x = loc['x'] + size['width'] / 4\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'right':\n x = loc['x'] + size['width'] - 50\n y = loc['y'] + 10\n else:\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n elif self.phone:\n if kwargs['side'] == 'middle':\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'left':\n x = loc['x'] + size['width'] / 4\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'right':\n x = screen_size['width'] - 40\n y = loc['y'] + 5\n else:\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n # an array of tuples\n action = TouchAction(self.driver)\n action.tap(x=x, y=y).perform()", "def click(button='left', coords=(0, 0)):\n _perform_click_input(button=button, coords=coords)", "def click(point):\n m = PyMouse()\n m.move(*point)\n m.press(*point)\n m.release(*point)", "def click_on_hero():\n mouseclick(coords_hero_button[0], coords_hero_button[1])", "def click(self, event):\n if self.segs == []:\n startCircle = self.findInter(event.x, event.y)\n if startCircle:\n xa, ya, xb, yb = self.can.coords(startCircle)\n self.firstCoords = ((xa + xb)/2, (ya + yb)/2)\n if not self.helpShown:\n self.showHelp()", "def click(self, x, y):\n # adb click 0,0 will have a weird behavior\n if x <= 0 and y <= 0:\n return\n cmd = \"shell input tap {x} {y}\".format(x=x, y=y)\n self.android_device_driver.adb.exec_adb_cmd(cmd).wait()", "def wheel_click(coords=(0, 0)):\n _perform_click_input(button='middle', coords=coords)", "def corner_hover(position: Corner) -> None:\n corner = Corner.absolute_position(position.corner)\n x = corner[0] + position.x\n y = corner[1] + position.y\n actions.mouse_move(x, y)", "def on_click(self, x, y):\n self.menu_pointer.on_click(x, y)", "def on_click(self, x, y):\n mul_x, mul_y = self.multiplier\n off_x, off_y = self.offset\n x -= off_x\n x /= mul_x\n y -= off_y\n y /= mul_y\n for button in self.button_dict.values():\n button.check_click(x, y)", "def right_click(coords=(0, 0)):\n _perform_click_input(button='right', coords=coords)", "def move_mouse_to_and_click(self, selector, x=0, y=0):\n self.move_mouse_to(selector, x, y, return_action_chain=True).click().perform()", "def click_element_by_point(self,param,ignore_error_handle = False):\n message = {};\n step = 'click element by point x:' + str(param['x']) + ' y:' + str(param['y']);\n try:\n point_x = param['x'];\n point_y = param['y'];\n click_count = param['count'];\n touch_action = TouchAction(self.driver);\n touch_action.tap(x=point_x,y=point_y,count=click_count).release().perform();\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n print 'catch exception:'+ str(e);\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def mouse_click(left_right, down_up, x, y):\n mouse_events = {\n \"leftdown\": 0x8002,\n \"leftup\": 0x8004,\n \"rightdown\": 0x8008,\n \"rightup\": 0x8010\n }\n ctypes.windll.user32.SetCursorPos(x, y)\n ctypes.windll.user32.mouse_event(mouse_events[left_right.lower() + down_up.lower()], int(x), int(y), 0, 0)", "def right_click(self):\n self.scroll_to()\n ActionChains(self.driver).context_click(self._element).perform()", "def click(self, x: float, y: float) -> bool:\n\n if self.x <= x <= self.x + self.width and self.y <= y < self.y + self.height:\n return True\n\n return False", "def mouse_click(self,event):\n global drag_sq\n# print \"click at {0} {1}\".format(event.x,event.y)\n# sq = (event.y // sq_size) * 8 + event.x // sq_size\n sq = self.coord_to_sq((event.x, event.y))\n if sq in self.piece_objs:\n drag_sq = sq\n self.canvas.tag_raise(self.piece_objs[sq])\n return", "def click_event(self, event, x, y, flags, param):\n del flags, param\n if self.calibrated:\n if event == cv2.EVENT_LBUTTONUP:\n # Left click\n self.event = {'event': 'Positioning', 'x': x, 'y': y}\n\n elif event == cv2.EVENT_RBUTTONUP:\n # Right click\n self.event = {'event': 'PatchClamp', 'x': x, 'y': y}\n pass", "def click_callback(self, event):\n # print(\"clicked at \", event.x+self.offset_x, event.y+self.offset_y)\n # x = string.ascii_lowercase[math.ceil((event.x + self.offset_x) / self.width) - 1]\n # y = (math.ceil((event.y + self.offset_y) / self.width) - 9) * -1\n self.clear_moves_on_canvas()\n\n x = math.ceil((event.x + self.offset_x) / self.width) - 1\n y = math.ceil((event.y + self.offset_y) / self.width) - 1\n\n if 0 <= x < 8 and 0 <= y < 8:\n board_value = self.game.board[x][y]\n if self.moving:\n # check if second click isn't on another piece\n if board_value != \"\" and board_value[0] == self.game.current_player_color:\n self.calculate_moves_for_moving_piece(x, y)\n else:\n self.move_piece(x, y) # method moves moving_piece\n self.moving = False\n else:\n self.calculate_moves_for_moving_piece(x, y) # method sets moving_piece", "def relative_mouse_click(left_right, down_up, x, y):\n try:\n winId = win32gui.GetForegroundWindow()\n (left, top, right, bottom) = win32gui.GetWindowRect(winId)\n mouse_events = {\n \"leftdown\": 0x02,\n \"leftup\": 0x04,\n \"rightdown\": 0x08,\n \"rightup\": 0x10\n }\n\n ctypes.windll.user32.SetCursorPos(x, y)\n ctypes.windll.user32.mouse_event(mouse_events[left_right.lower() + down_up.lower()], int(x) + left,\n int(y) + top, 0, 0)\n except Exception as e:\n raise Exception(\"relativeMouseClick: \" + str(e))", "def click(self, key, duration=0.5, relative_size=(None, None)):\n '''\n x, y = self.locate(key, relative_size)\n pyautogui.click(x, y, interval=duration)\n\n '''\n self._mouse_moveTo(key, relative_size)\n\n pyautogui.mouseDown()\n time.sleep(duration)\n pyautogui.mouseUp()\n time.sleep(0.25)", "def click(self, mouse_pos: Tuple[int, int]):\n self.clicked = self.img_rect and self.img_rect.collidepoint(\n mouse_pos) and not self.clicked\n return self.clicked", "def checkClick(self, pos):\r\n self.dice.checkClick(pos)\r\n self.turnchanger.checkClick(pos)\r\n for point in self.points:\r\n point.checkClick(pos)", "def do_click(self, str_arg):\n arg = validateString(str_arg)\n for tmp in range(REPEAT_TIMES_ON_ERROR):\n try:\n if arg.startswith('('):\n point = self.__getPointXY(arg)\n printLog(self.threadName + '[clicking point %s...]' % arg, logging.DEBUG)\n self.adbc.touch(point[0], point[1], \"DOWN_AND_UP\")\n else:\n if \"/\" not in arg:\n raise ValueError('bad argument of do_click().')\n # get the target view\n tv = self.__getView(arg)\n if tv:\n if DEBUG:\n printLog('Found view %s.' % arg, logging.DEBUG)\n printLog(self.threadName + 'tinyStr: %s' % tv.__tinyStr__(), logging.DEBUG)\n # printLog(self.threadName + 'position and size: {}'.format(tv.getPositionAndSize()),\n # logging.DEBUG)\n printLog(self.threadName + '[clicking id %s...]' % arg, logging.DEBUG)\n tv.touch()\n else:\n printLog('Target view %s not found.' % arg, logging.ERROR)\n self.resultFlag = False\n return\n except Exception, e:\n printLog(self.threadName + 'the %dst try failed due to %s, will retry.' % (tmp, e.message),\n logging.ERROR)\n # self.reconnect()\n time.sleep(1)\n continue\n # finally:\n # printLog(self.threadName + \"[status=%s]\" % self.resultFlag)\n printLog(self.threadName + 'CLICK FAILED: still can\\'t make the click. please check the test environment.',\n logging.CRITICAL)\n self.resultFlag = False" ]
[ "0.7769098", "0.7308915", "0.7297762", "0.7030748", "0.69894505", "0.6878038", "0.6859976", "0.6858614", "0.68559927", "0.6754344", "0.6660804", "0.6593836", "0.6580712", "0.65693444", "0.6496045", "0.64715344", "0.6298699", "0.6289323", "0.62853897", "0.62313634", "0.62164855", "0.621111", "0.61936", "0.6160396", "0.6139874", "0.613925", "0.6135863", "0.6130008", "0.6091415", "0.6073377" ]
0.8152278
0
Print the mouse position relative to each corner. Use to get hardcodable positions.
def print_mouse_positions() -> None: mouse_pos = ctrl.mouse_pos() print(f"Absolute mouse pos: {mouse_pos}") screen = ui.main_screen().rect print(f"Main screen: {screen}") for corner in [ Corner.TOP_LEFT, Corner.TOP_RIGHT, Corner.BOTTOM_LEFT, Corner.BOTTOM_RIGHT, ]: corner_pos = Corner.absolute_position(corner) relative = (mouse_pos[0] - corner_pos[0], mouse_pos[1] - corner_pos[1]) print(f"Position relative to {corner}: {relative}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def report_mouse_position(x_pos=0, y_pos=0):\n print('x-axis:', x_pos, ' Y-axis: ', y_pos, flush=True)", "def mousePositionRaw(self):", "def mousePositionRaw(self):", "def mousePosition(self):", "def print_mouse_event(self, event, what):\n print('%s - pos: %r, button: %s, delta: %r' %\n (what, event.pos, event.button, event.delta))", "def display(self):\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", end=\"\")\n for row in range(self.width):\n print(\"#\", end=\"\")\n print()", "def printXY(self):\n print zip(self.x, self.y)", "def print_my_coords(x = 0, y = 0):\n\tprint(\"Coords (%d;%d)\" % (x,y))", "def print_cords(self):\n print('startX :', self.startX, ' ,startY :', self.startY, ' ,endX :', self.endX, ' ,endY :', self.endY)", "def print_coordinates_change(self, coordinates):#Qrect\n self.coordinates1.setText(str(coordinates.top()*2))\n self.coordinates2.setText(str(coordinates.left()*2))\n self.coordinates3.setText(str(coordinates.bottom()*2))\n self.coordinates4.setText(str(coordinates.right()*2))", "def display_coordinates(self) -> None:\n\n print('Y coordinate: ', self.player.row_position + 1)\n print('X coordinate: ', self.player.column_position + 1)", "def display(self):\n for b in range(self.y):\n print()\n for i in range(self.height):\n print(\" \" * self.x + \"#\" * self.width)", "def display(self):\n prow = self.__width * '#'\n nstr = self.y * \"\\n\"\n for x in range(self.__height):\n nstr += self.x * \" \"\n nstr += prow\n if x == (self.__height - 1):\n break\n nstr += \"\\n\"\n print(nstr)", "def display(self):\n for i in range(self.__y):\n print()\n for i in range(self.__height):\n print(\" \" * self.__x + \"#\" * self.__width)", "def display(self):\n print('\\n' * (self.__y), end='')\n for point in range(self.__height):\n print(' ' * self.__x, end='')\n for point in range(self.__width - 1):\n # print(' ' * self.__x, end='')\n print('#', end='')\n print('#')", "def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]", "def pos(self):\n x = (self.ec._win._mouse_x -\n self.ec._win.width / 2.) / (self.ec._win.width / 2.)\n y = (self.ec._win._mouse_y -\n self.ec._win.height / 2.) / (self.ec._win.height / 2.)\n return np.array([x, y])", "def normal_mouse_move(self, event):\n plot = self.component\n if plot is not None:\n if isinstance(plot, BaseXYPlot):\n ndx = plot.map_index((event.x, event.y), index_only = True)\n x = plot.index.get_data()[ndx]\n y = plot.value.get_data()[ndx]\n print self.format % (x,y)\n else:\n print \"dataprinter: don't know how to handle plots of type\",\n print plot.__class__.__name__\n return", "def display(self):\n mg_w = self.width\n mg_h = self.height\n str_to_prt = \"\\n\" * self.y + (\" \" * self.x + \"#\" * mg_w + '\\n') * mg_h\n print(str_to_prt[:-1])", "def display(self):\n width = self.width\n height = self.height\n x = self.x\n y = self.y\n for d_y in range(y):\n print()\n for h in range(height):\n if x != 0:\n print(\" \" * x, end=\"\")\n print(\"#\" * width)", "def print_marks(self):\n\t\tSYMBOLS = {CLOSED: \".\", FLAG: \"x\", BOOM: \"#\", CLEAR: \" \"}\n\t\tfor y in range(self.height):\n\t\t\tfor x in range(self.width):\n\t\t\t\tm = self.marks[x][y]\n\t\t\t\tprint(SYMBOLS.get(m, m), end=\"\")\n\t\t\tprint(\"\")", "def mouse_position(self):\r\n # TODO: add: Now deprecated in favor of pi3d.events\r\n if self.mouse:\r\n return self.mouse.position()\r\n elif self.tkwin:\r\n return self.tkwin.winfo_pointerxy()\r\n else:\r\n return -1, -1", "def get_mouse_position(self):\n raise NotImplementedError", "def dump(self):\r\n return map(int, (self.left, self.top, self.right, self.bottom))", "def my_print(self):\n length = self.__size\n\n if self.__size == 0:\n print(\"\")\n\n \"\"\"Print using position of y-axis.\"\"\"\n for i in range(self.__position[1]):\n print(\"\")\n for j in range(length):\n \"\"\"Print spaces and # in x-axis.\"\"\"\n print((\" \" * self.__position[0]) + (\"#\" * length))", "def display(self):\n for row0 in range(self.y):\n print()\n for row in range(self.height):\n for column0 in range(self.x):\n print(\" \", end=\"\")\n for column in range(self.width):\n print(\"#\", end=\"\")\n print()", "def display(self):\n\n print(\"\\n\" * self.__y, end='') # y offset\n\n for i in range(self.__height):\n print(\" \" * self.__x, end='') # x offset\n print(\"#\" * self.__width)", "def getMousePosition(self):\n return (self.mouseData.x, self.mouseData.y)", "def display(self):\n for _jumpline in range(self.y):\n print(end=\"\\n\")\n for _height in range(self.height):\n for _space in range(self.x):\n print(\" \", end=\"\")\n for _width in range(self.width):\n print(\"#\", end=\"\")\n print(end=\"\\n\")", "def display(self):\n row = (' ' * self.__x) + (Rectangle.print_symbol * self.__width) + '\\n'\n print(('\\n' * self.__y) + (row * self.__height), end=\"\")" ]
[ "0.7387262", "0.69946706", "0.69946706", "0.68717116", "0.67130166", "0.66660905", "0.66269225", "0.65681857", "0.65366256", "0.63911045", "0.60892487", "0.60873604", "0.60664666", "0.60659", "0.60434276", "0.6039815", "0.6035616", "0.6003942", "0.5998977", "0.5984918", "0.5967985", "0.59262615", "0.591569", "0.58742684", "0.5869447", "0.5868869", "0.5863464", "0.5847432", "0.58335716", "0.5827271" ]
0.81904465
0
Make an image of differences between the first and second clip using ImageMagick. Will raise an exception if more than 2 clips are passed to the constructor.
def magick_compare(self) -> None: # Make diff images if len(self.clips) > 2: Status.fail(f'{self.__class__.__name__}: "magick_compare" can only be used with two clips!', exception=ValueError) self.path_diff = self.path / 'diffs' try: subprocess.call(['magick', 'compare'], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) self.path_diff.mkdir(parents=True) except FileNotFoundError as file_not_found: Status.fail( f'{self.__class__.__name__}: "magick compare" was not found!', exception=FileNotFoundError, chain_err=file_not_found ) except FileExistsError as file_err: Status.fail( f'{self.__class__.__name__}: {self.path_diff.to_str()} already exists!', exception=FileExistsError, chain_err=file_err ) all_images = [sorted((self.path / name).glob('*.png')) for name in self.clips.keys()] images_a, images_b = all_images cmds = [ f'magick compare "{i1.to_str()}" "{i2.to_str()}" ' + f'"{self.path_diff.to_str()}/diff_' + f'{f}'.zfill(len("%i" % self.max_num)) + '.png"' for i1, i2, f in zip(images_a, images_b, self.frames) ] # Launch asynchronously the Magick commands Status.info('Diffing clips...') print() SubProcessAsync(cmds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli(fig1, fig2, out):\n click.echo('\\n' + '.' * 50)\n\n # open first image\n image1 = Image.open(fig1)\n\n # open second image\n image2 = Image.open(fig2)\n\n # retrieve the image dimensions.\n width, height = image1.size\n width2, height2 = image2.size\n\n if [width, height] != [width2, height2]:\n print(\"Image dimensions do not match! The Two inputs must have equal dimensions\")\n exit(1)\n else:\n print(\"Fig1 dimensions: \", image1.size)\n print(\"Fig2 dimensions: \", image2.size)\n # Create a new image object.\n merged = Image.new('RGB', image1.size)\n\n for i in range(0, width):\n for j in range(0, height):\n ima1 = list(image1.getpixel((i, j)))\n ima2 = list(image2.getpixel((i, j)))\n if ima1 == ima2:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] == [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] != [0, 0, 0]:\n r, g, b, a = ima2\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] == [0, 0, 0]:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and ima2 == [255, 255, 255, 255]:\n r, g, b, a = ima1\n elif [ima2[0], ima2[1], ima2[2]] != [0, 0, 0] and ima1 == [255, 255, 255, 255]:\n r, g, b, a = ima2\n else:\n # print ima1,ima2\n r = (ima1[0] + ima2[0]) // 2\n g = (ima1[1] + ima2[1]) // 2\n b = (ima1[2] + ima2[2]) // 2\n a = 255\n # print [r,g,b,a]\n\n merged.putpixel((i, j), (r, g, b, a))\n merged.save(out)\n click.echo('\\n' + '.' * 50)", "def make_clips(self):\n\n average_messege_count, streamer_messeges_data = self.__do_analysis()\n\n clipworthy_clips = []\n\n #add clipworthy clips\n for entry in streamer_messeges_data:\n if((entry['messeges_count']*entry['messeges_count']) > (average_messege_count*1.8)):\n clipworthy_clips.append(entry)\n\n #combine clips that are next to one another in time\n clip_number = 0\n while(True):\n #print('clip_number = ' + str(clip_number) +' , length of cliparr = ' + str(len(clipworthy_clips)))\n if(clip_number >= (len(clipworthy_clips))-1):\n #at end of clips\n break\n\n if (clipworthy_clips[clip_number]['end_time']==clipworthy_clips[clip_number+1]['start_time']):\n #duplicate clip detected\n #print('dublicate clip detected for clip ' + str(clip_number))\n clipworthy_clips[clip_number]['end_time']=clipworthy_clips[clip_number+1]['end_time']\n #print('cliparr length before ridding: ' + str(len(clipworthy_clips)))\n clipworthy_clips.remove(clipworthy_clips[clip_number+1])\n #print('cliparr length after ridding: ' + str(len(clipworthy_clips)))\n #print('')\n else:\n clip_number = clip_number + 1\n\n\n print('clipworthy clips will now be made')\n clipSlicer = ClipSlicer(clipworthy_clips)\n clipSlicer.make_clips()\n\n print(\"clipworthy clips for streamer \"+ self.streamer + \" have been made\")", "def calc_difference(ndvi_tile1, ndvi_tile2, output):\n \n #open dataset and get Affine transformation and bounding properties \n with rio.open(ndvi1) as src1:\n meta = src1.meta.copy()\n transform = src1.meta[\"transform\"]\n x = meta['width']\n y = meta['height']\n band1 = src1.read()\n \n #open dataset \n with rio.open(ndvi2) as src2:\n #read the band as ndarray with the same dimension of src1\n band2 = src2.read(out_shape=(src1.height, src1.width), \n resampling=rio.enums.Resampling.bilinear)\n #create destination for reprojection of src2\n dst_crs = {'init': 'EPSG:32632'}\n proj_band2 = np.empty(src1.shape, dtype=np.float32)\n #reproject the src2 to match src1\n warp.reproject(band2, destination=proj_band2, src_transform=src2.transform, src_crs=src2.crs, \n dst_transform=transform, dst_crs=dst_crs) \n \n #calculate difference between reprojected band2 and band1\n difference = np.subtract(proj_band2, band1)\n #create outfile\n outfile = output\n #write outfile with the properties and resolution of src1\n with rio.open(outfile, 'w', **meta) as dst:\n dst.write(difference, window=rio.windows.Window(col_off=0, row_off=0, width=x, height=y))\n\n return outfile", "def _crossing_over(self, img_ext_1, img_ext_2) -> ExtendedImage:\n # Copy first extended image\n new_member = img_ext_1.img.copy()\n height = img_ext_2.get_height()\n\n # Add the right half of the 2nd image to copy of the 1st image\n new_member[0:, (height // 2):, :3] = img_ext_2.img[0:, (height // 2):, :3]\n return ExtendedImage(new_member)", "def stack_compare(\n clips: List[vs.VideoNode],\n height: Optional[int] = None,\n identity: bool = False,\n max_vertical_stack: int = 2,\n interleave_only: bool = False,\n *,\n text_mode: bool = False,\n):\n the_string = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ123456789abcefghijklmnopqrstuvwxyz\"\n if len(clips) < 2:\n raise ValueError(\"stack_compare: please provide 2 or more clips.\")\n has_plugin_or_raise([\"sub\", \"text\"], True)\n use_text_mode = not hasattr(core, \"sub\") or text_mode\n\n def _fallback_str(num: int) -> str:\n try:\n return the_string[num]\n except IndexError:\n return f\"Extra{num + 1}\"\n\n def _generate_ident(clip_index: int, src_w: int, src_h: int) -> str:\n gen = r\"{\\an7\\b1\\bord5\\c&H00FFFF\\pos\"\n if use_text_mode:\n gen = \"\"\n else:\n gen += \"({w}, {h})\".format(w=25 * (src_w / 1920), h=25 * (src_h / 1080))\n gen += r\"\\fs\" + \"{0}\".format(60 * (src_h / 1080)) + r\"}\"\n gen += \"Clip {0}\".format(_fallback_str(clip_index))\n return gen\n\n # Check for luma only clip\n only_use_luma = False\n for clip in clips:\n if clip.format.num_planes == 1:\n only_use_luma = True\n break\n\n if interleave_only:\n if only_use_luma:\n clips = [get_y(clip) for clip in clips]\n\n # Set identity\n if identity:\n if use_text_mode:\n clips = [\n core.text.Text(\n clip,\n _generate_ident(i, clip.width, clip.height)\n )\n for i, clip in enumerate(clips)\n ]\n else:\n clips = [\n clip.sub.Subtitle(\n _generate_ident(\n idx,\n clip.width,\n clip.height,\n )\n )\n for idx, clip in enumerate(clips)\n ]\n return core.std.Interleave(clips, mismatch=True)\n\n def _calculate_needed_clip(max_vert: int, clip_total: int) -> int:\n multiples_of = list(range(max_vert, (clip_total + 1) * max_vert, max_vert))\n multiples_of_total = len(multiples_of)\n max_needed = 1\n for i in range(multiples_of_total):\n if i + 1 == multiples_of_total - 1:\n break\n if multiples_of[i] <= clip_total <= multiples_of[i + 1]:\n max_needed = multiples_of[i + 1]\n break\n return max_needed\n\n # Set YUV video to Y video if only_use_luma.\n if only_use_luma:\n clips = [get_y(clip) for clip in clips]\n\n if identity:\n if use_text_mode:\n clips = [\n core.text.Text(\n clip,\n _generate_ident(i, clip.width, clip.height)\n )\n for i, clip in enumerate(clips)\n ]\n else:\n clips = [\n clip.sub.Subtitle(\n _generate_ident(\n idx,\n clip.width,\n clip.height,\n )\n )\n for idx, clip in enumerate(clips)\n ]\n\n # Find needed clip for current `max_vertical_stack`.\n if len(clips) != max_vertical_stack:\n needed_clip = _calculate_needed_clip(max_vertical_stack, len(clips))\n f_clip = clips[0]\n for _ in range(needed_clip - len(clips)):\n bclip = core.std.BlankClip(f_clip)\n if not use_text_mode:\n bclip = bclip.sub.Subtitle(\n r\"{\\an5\\fs120\\b1\\pos(\"\n + \"{},{}\".format(f_clip.width / 2, f_clip.height / 2)\n + r\")}BlankClip Pad\\N(Ignore)\"\n )\n clips.append(bclip)\n\n # Split into chunks of `max_vertical_stack` and StackVertical it.\n # Input: [A, B, C, D, E, F, G, H]\n # Output: [[A, B], [C, D], [E, F], [G, H]]\n clips = [\n core.std.StackVertical(clips[i : i + max_vertical_stack])\n for i in range(0, len(clips), max_vertical_stack)\n ]\n final_clip = core.std.StackHorizontal(clips) if len(clips) > 1 else clips[0]\n if height:\n if height != final_clip.height:\n ar = final_clip.width / final_clip.height\n final_clip = final_clip.resize.Bicubic(\n get_w(height, ar),\n height,\n )\n return final_clip", "def make_diff(file_before, file_after, file_output_name):\n if os.path.exists(file_output_name):\n shutil.rmtree(file_output_name)\n os.mkdir(file_output_name)\n psd_diff = diff(file_before, file_after)\n diff_content = {}\n for attr in [\"header\", \"layer\"]:\n diff_content[attr] = getattr(psd_diff, attr)\n with open(os.path.join(file_output_name, \"diff.json\"), \"w\") as diff_file:\n json.dump(diff_content, diff_file, indent=4)\n saved_files = []\n for layer_id in psd_diff.layer.keys():\n if len(psd_diff.layer_image[layer_id]) > 1:\n output_image = os.path.join(file_output_name, layer_id)\n psd_diff.layer_image[layer_id][\"before\"].save(output_image + \".before.png\")\n psd_diff.layer_image[layer_id][\"after\"].save(output_image + \".after.png\")\n diff_image_before = Image.new(\"RGBA\", psd_diff.layer_image[layer_id][\"before\"].size)\n diff_image_before_data = diff_image_before.load()\n diff_image_after = Image.new(\"RGBA\", psd_diff.layer_image[layer_id][\"after\"].size)\n diff_image_after_data = diff_image_after.load()\n width, height = diff_image_before.size\n pixel_index = 1\n for y in xrange(height):\n for x in xrange(width):\n if str(pixel_index) in diff_content[\"layer\"][layer_id][\"pixel\"]:\n diff_image_before_data[x, y] = tuple(diff_content[\"layer\"][layer_id][\"pixel\"][str(pixel_index)][\"before\"])\n diff_image_after_data[x, y] = tuple(diff_content[\"layer\"][layer_id][\"pixel\"][str(pixel_index)][\"after\"])\n else:\n diff_image_before_data[x, y] = (0, 0, 0, 0)\n diff_image_after_data[x, y] = (0, 0, 0, 0)\n pixel_index += 1\n diff_image_before.save(output_image + \".before.diff.png\", \"PNG\")\n diff_image_after.save(output_image + \".after.diff.png\", \"PNG\")\n saved_files.append(output_image + \".before.png\")\n saved_files.append(output_image + \".before.diff.png\")\n saved_files.append(output_image + \".after.diff.png\")\n saved_files.append(output_image + \".after.png\")\n saved_files.append(file_output_name + \"/diff.json\")\n return saved_files", "def diffImages(imgA, imgB):\n bandsImgA = imgA.split()\n bandsImgB = imgB.split()\n\n absDiff = ImageMath.eval(\"convert(abs(a0-b0) + abs(a1-b1) + abs(a2-b2), 'L')\",\n a0 = bandsImgA[0], b0 = bandsImgB[0],\n a1 = bandsImgA[1], b1 = bandsImgB[1],\n a2 = bandsImgA[2], b2 = bandsImgB[2])\n bandsImgOut = [\n ImageMath.eval(\"convert(a + 2*diff, 'L')\", a = bandsImgA[0], diff = absDiff),\n ImageMath.eval(\"convert(a - diff, 'L')\", a = bandsImgA[1], diff = absDiff),\n ImageMath.eval(\"convert(a - diff, 'L')\", a = bandsImgA[2], diff = absDiff),\n ]\n\n return Image.merge('RGB', bandsImgOut)", "def composite(args):\n\n # load the input image\n logging.info('Loading input image %s' % (args.input))\n inputImage = load_image(args.input)\n\n # load the target image\n logging.info('Loading target image %s' % (args.target))\n targetImage = load_image(args.target)\n\n # load the mask image\n logging.info('Loading mask image %s' % (args.mask))\n maskImage = load_image(args.mask)\n\n # If None, set the source points or sets them to the whole input image\n if args.source == None:\n (height, width, _) = inputImage.shape\n args.source = [0.0, height, 0.0, 0.0, width, 0.0, width, height]\n\n # Loads the source points into a 4-by-2 array\n source_points = np.array(args.source).reshape(4, 2)\n\n # Loads the target points into a 4-by-2 array\n target_points = np.array(args.dst).reshape(4, 2)\n\n # Compute the composite image\n result = composite_image(inputImage, targetImage,\n source_points, target_points, maskImage)\n result=np.uint8(result)\n # save the result\n logging.info('Saving result to %s' % (args.output))\n imageio.imwrite(args.output, result)", "def interpretSubclipParameters(n1, n2, frameCount):\n\n if n1 < 0:\n n1 += frameCount\n if n2 < 0:\n n2 += frameCount\n if n1 > n2 or n1 < 0 or n1 >= frameCount or n2 < 1 or n2 > frameCount:\n raise ValueError(\"invalid subclip parameters: n1 = {}, n2 = {}, clip.frameCount = {}\".format(n1, n2, frameCount))\n\n return (n1, n2)", "def bakeClip(*args, blend: List[int, int]=None, clipIndex: Union[int, List[int]]=0,\n keepOriginals: bool=True, name: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def union_crops(crop1, crop2):\n x11, y11, x21, y21 = crop1\n x12, y12, x22, y22 = crop2\n return min(x11, x12), min(y11, y12), max(x21, x22), max(y21, y22)", "def ffmpeg_subclip_video_file(filename, t1, t2):\n subprocess.call(['ffmpeg', '-i', filename, '-ss', str(t1), '-to', str(t2), '-c', 'copy', '-y', filename.split('.')[0] + '_subclip.mp4'])\n return", "def color_transfer(source, target, clip=True, preserve_paper=True):\n # convert the images from the RGB to L*ab* color space, being\n # sure to utilizing the floating point data type (note: OpenCV\n # expects floats to be 32-bit, so use that instead of 64-bit)\n source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype(\"float32\")\n target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype(\"float32\")\n\n # compute color statistics for the source and target images\n (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source)\n (lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target)\n\n # subtract the means from the target image\n (l, a, b) = cv2.split(target)\n l -= lMeanTar\n a -= aMeanTar\n b -= bMeanTar\n\n if preserve_paper:\n # scale by the standard deviations using paper proposed factor\n l = (lStdTar / lStdSrc) * l\n a = (aStdTar / aStdSrc) * a\n b = (bStdTar / bStdSrc) * b\n else:\n # scale by the standard deviations using reciprocal of paper proposed factor\n l = (lStdSrc / lStdTar) * l\n a = (aStdSrc / aStdTar) * a\n b = (bStdSrc / bStdTar) * b\n\n # add in the source mean\n l += lMeanSrc\n a += aMeanSrc\n b += bMeanSrc\n\n # clip/scale the pixel intensities to [0, 255] if they fall\n # outside this range\n l = _scale_array(l, clip=clip)\n a = _scale_array(a, clip=clip)\n b = _scale_array(b, clip=clip)\n\n # merge the channels together and convert back to the RGB color\n # space, being sure to utilize the 8-bit unsigned integer data\n # type\n transfer = cv2.merge([l, a, b])\n transfer = cv2.cvtColor(transfer.astype(\"uint8\"), cv2.COLOR_LAB2BGR)\n\n # return the color transferred image\n return transfer", "def create_mask_wrapper(args, crop=None, scale=1, flip=False,\n reflective=False, warm=False, reduce_red=False,\n saturate=False):\n input_filename, output_filename = args[0], args[1]\n try:\n img, mask = get_mask(input_filename, crop, scale, flip, reflective,\n warm, reduce_red, saturate)\n except:\n return\n mask = np.expand_dims(mask, axis=2)\n mask = (255 * mask).astype(np.uint8)\n img = np.concatenate((img, mask), axis=2)\n cv2.imwrite(output_filename, img)", "def make_clips(self):\n print('starting to make clips!')\n #TODO parallelize this with multiprocessing\n clip_number = 1\n \n for requested_clip in self.requested_clips:\n streamer_output_file_location = output_file_location + requested_clip['source_clip'] + '/'\n streamer_clips_output_file_location = streamer_output_file_location + 'clips/'\n\n print('opening file ' + streamer_output_file_location+requested_clip['source_clip']+'.mkv')\n entire_stream_clip = VideoFileClip(streamer_output_file_location+requested_clip['source_clip']+'.mkv')\n print('requested time: ' + str(requested_clip['start_time'].total_seconds()))\n print('requested end time: ' + str(requested_clip['end_time'].total_seconds()))\n print('clip duration:'+ str(entire_stream_clip.duration))\n clip = None\n if(requested_clip['end_time'].total_seconds()>entire_stream_clip.duration):\n #longer time than clip specified, use end of clip as end time\n clip = entire_stream_clip.subclip(requested_clip['start_time'].total_seconds(),entire_stream_clip.duration)\n else:\n clip = entire_stream_clip.subclip(requested_clip['start_time'].total_seconds(),requested_clip['end_time'].total_seconds())\n \n if not os.path.exists(streamer_clips_output_file_location):\n print('No directory found for given streamer, making new dir...')\n os.makedirs(streamer_clips_output_file_location)\n print(\"now rendering clip \" + self.requested_clips[0]['source_clip']+str(clip_number)+'.mp4 out of ' + str(len(self.requested_clips)))\n clip.write_videofile(streamer_clips_output_file_location + str(clip_number)+'.mp4')\n clip_number = clip_number + 1", "def create_clipping(request):\n if request.method == 'POST':\n image = request.POST.get('image')\n\n # check if the blanked image should be saved to the backend\n if request.POST.get('save_clipping') in ['false', False]:\n save = False\n else:\n save = True\n\n selection = {\n 'id': int(request.POST.get('selection[id]')),\n 'x': int(round(float(request.POST.get('selection[x]')))),\n 'y': int(round(float(request.POST.get('selection[y]')))),\n 'width': int(round(float(request.POST.get('selection[width]')))),\n 'height': int(round(float(request.POST.get('selection[height]')))),\n 'full_width': int(round(float(request.POST.get('selection[full_width]')))),\n 'full_height': int(round(float(request.POST.get('selection[full_height]'))))\n }\n\n # get the image id, the model object and the selection from the model\n edit_url = request.POST.get('edit_url')\n image_id = int(edit_url.split(\"/\")[-2])\n image_object = CustomImage.objects.get(id=image_id)\n original_selection = image_object.selections\n\n reseized_image = image_object.resize_url\n\n if selection['id'] == -1: # no selection -> use the whole image\n cropped_image = reseized_image\n else: # a specific selecion is used -> get the cropped image and selecion attributes\n image_object = CroppedImage.objects.get(id=selection['id'])\n cropped_image = original_selection[unicode(request.POST.get('selection[id]'))][\"url\"]\n\n # calculate the new blanking mask\n mask = get_mask_from_image(image, selection, cropped_image, save, image_object)\n\n # stream the new mask to the output\n stream = BytesIO()\n flat_mask = []\n for line in mask:\n flat_mask.extend(line)\n np.savetxt(stream, flat_mask, fmt=\"%u\", delimiter=', ', newline=', ')\n stream.seek(0)\n return HttpResponse(stream.read())\n\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\"\n )", "def clip_by_r2(fname_r1, fname_r2, out_dir):\n\n log = collections.defaultdict(int)\n log.update({x:y for x,y in locals().items() if x!='log'})\n\n fastq_file_r1 = open(fname_r1, 'r')\n fastq_file_r2 = open(fname_r2, 'r')\n out_fname_r1 = f\"{out_dir}/{os.path.basename(fname_r1)}\"\n out_fname_r2 = f\"{out_dir}/{os.path.basename(fname_r2)}\"\n\n\n\n n = -1\n keep = []\n\n\n # Write a new, clipped, R2 file, discarding reads with short inserts.\n fh = open(out_fname_r2, 'w')\n while True:\n name1, seq1, qual1 = read_a_read(fastq_file_r1)\n\n name2, seq2, qual2 = read_a_read(fastq_file_r2)\n if (not name1) or (not name2):\n break\n\n n += 1\n to_search = rc(seq1[6:13])\n seq2 = seq2.split(to_search)[0]\n \n if len(seq2) <= 6:\n log['R2 <=6 nt after removing R1 hexamer, discarded'] += 1\n continue\n log['R2 kept'] += 1\n\n qual2 = qual2[:len(seq2)]\n keep.append(n)\n fh.write(f'{name2}{seq2}\\n+\\n{qual2}\\n')\n\n fh.close()\n fastq_file_r1.close()\n fastq_file_r2.close()\n\n log['Reads input to clip_by_r2'] = log['R2 <=6 nt after removing R1 hexamer, discarded'] + log['R2 kept']\n if log['Reads input to clip_by_r2']>0:\n log['% Kept after removing hexamer'] = 100 * log['R2 kept']/log['Reads input to clip_by_r2']\n else:\n log['% Kept after removing hexamer'] = 100\n\n # Write a new R1 file (not clipped), keeping only the inserts in the new R1 file.\n fastq_file_r1 = open(fname_r1, 'r')\n n = -1\n on_element = 0\n fh = open(out_fname_r1, 'w')\n while len(keep)>(on_element):\n n += 1\n if (n == keep[on_element]):\n name1, seq1, qual1 = read_a_read(fastq_file_r1)\n\n if not name1:\n break\n\n fh.write('{0}{1}\\n+\\n{2}\\n'.format(name1, seq1, qual1))\n on_element += 1\n continue\n\n elif(n<keep[on_element]):\n name1, seq1, qual1 = read_a_read(fastq_file_r1)\n continue\n\n else:\n break\n\n fh.close()\n fastq_file_r1.close()\n fastq_file_r2.close()\n\n #check_file_lengths(out_fname_r1, out_fname_r2)\n return log", "def animate_slices_multi(field='uu1', datadir1='data/', datadir2='data/', proc=-1, extension='xz',\n format='native', tmin=0., tmax=1.e38, wait=0.,\n amin=0., amax=1., transform='', oldfile=False,\n makemovie=False):\n\n import pylab as plt\n\n datadir1 = os.path.expanduser(datadir1)\n if proc < 0:\n filename1 = join(datadir1, 'slice_' + field + '.' + extension)\n else:\n filename1 = join(datadir1, 'proc' + str(proc),\n 'slice_' + field + '.' + extension)\n\n datadir2 = os.path.expanduser(datadir2)\n if proc < 0:\n filename2 = join(datadir2, 'slice_' + field + '.' + extension)\n else:\n filename2 = join(datadir2, 'proc' + str(proc),\n 'slice_' + field + '.' + extension)\n\n # Read the global dimensions.\n dim = read_dim(datadir1, proc)\n if dim.precision == 'D':\n precision = 'd'\n else:\n precision = 'f'\n\n # Set up slice plane.\n if extension == 'xy' or extension == 'Xy':\n hsize = dim.nx\n vsize = dim.ny\n if extension == 'xz':\n hsize = dim.nx\n vsize = dim.nz\n if extension == 'yz':\n hsize = dim.ny\n vsize = dim.nz\n plane1 = np.zeros((vsize, hsize), dtype=precision)\n plane2 = np.zeros((vsize, hsize), dtype=precision)\n\n infile1 = npfile(filename1, endian=format)\n infile2 = npfile(filename2, endian=format)\n\n #ax = plt.axes()\n #ax.set_xlabel('')\n #ax.set_ylabel('')\n #ax.set_ylim\n #ax.get_xaxis().set_visible(False)\n #ax.get_yaxis().set_visible(False)\n\n fig, (ax1,ax2) = plt.subplots(1,2)\n #fig.suptitle('Re = 400', fontsize=20)\n image1 = ax1.imshow(plane1, vmin=amin, vmax=amax)\n image2 = ax2.imshow(plane2, vmin=amin, vmax=amax)\n ax1.set_xlabel('')\n ax1.set_ylabel('')\n ax1.get_xaxis().set_visible(False)\n ax1.get_yaxis().set_visible(False)\n ax2.set_xlabel('')\n ax2.set_ylabel('')\n ax2.get_xaxis().set_visible(False)\n ax2.get_yaxis().set_visible(False)\n\n # Get the figure manager for real-time image display.\n manager = plt.get_current_fig_manager()\n manager.show()\n\n ifirst = True\n islice = 0\n files = []\n\n while True:\n try:\n raw_data1 = infile1.fort_read(precision)\n raw_data2 = infile2.fort_read(precision)\n except ValueError:\n break\n except TypeError:\n break\n\n if oldfile:\n t = raw_data1[-1]\n plane1 = raw_data1[:-1].reshape(vsize, hsize)\n plane2 = raw_data2[:-1].reshape(vsize, hsize)\n else:\n t = raw_data1[-2]\n plane1 = raw_data1[:-2].reshape(vsize, hsize)\n plane2 = raw_data2[:-2].reshape(vsize, hsize)\n\n if transform:\n exec('plane = plane' + transform)\n\n if t > tmin and t < tmax:\n title = 't = %11.3e' % t\n #fig.set_title(title)\n image1.set_data(plane1)\n image2.set_data(plane2)\n manager.canvas.draw()\n\n if ifirst:\n #print \"----islice----------t---------min-------max-------delta\" # Python 2\n print(\"----islice----------t---------min-------max-------delta\")\n #print \"%10i %10.3e %10.3e %10.3e %10.3e\" \\ # Python 2\n #% (islice, t, plane.min(), plane.max(), plane.max() - plane.min()) # Python 2\n print(\"{0:10} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e}\".format(islice, t, plane1.min(), plane1.max(), plane1.max() - plane1.min()))\n\n if(makemovie):\n fname = '_tmp%03d.png' % islice\n fig.savefig(fname)\n files.append(fname)\n\n ifirst = False\n islice += 1\n\n sleep(wait)\n\n infile1.close()\n infile2.close()\n\n if(makemovie):\n print('Making movie animation.mpg - this make take a while')\n # SC: Not all systems use mencoder. Need to change this into ffmpeg.\n os.system(\"mencoder 'mf://_tmp*.png' -mf type=png:fps=24 -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o animation.mpg\")\n os.system(\"rm _tmp*.png\")", "def sub(input_a, input_b):\n sub_comp = input_b.duplicate()\n\n ImageBufAlgo.sub(sub_comp, input_a, input_b)\n\n if sub_comp.has_error:\n print \"Error merging subtracting:\", sub_comp.geterror()\n\n return sub_comp", "def compare_images(image1, image2, method='diff', *, n_tiles=(8, 8)):\n if image1.shape != image2.shape:\n raise ValueError('Images must have the same shape.')\n\n img1 = img_as_float(image1)\n img2 = img_as_float(image2)\n\n if method == 'diff':\n comparison = np.abs(img2 - img1)\n elif method == 'blend':\n comparison = 0.5 * (img2 + img1)\n elif method == 'checkerboard':\n shapex, shapey = img1.shape\n mask = np.full((shapex, shapey), False)\n stepx = int(shapex / n_tiles[0])\n stepy = int(shapey / n_tiles[1])\n for i, j in product(range(n_tiles[0]), range(n_tiles[1])):\n if (i + j) % 2 == 0:\n mask[i * stepx:(i + 1)*stepx, j * stepy:(j + 1) * stepy] = True\n comparison = np.zeros_like(img1)\n comparison[mask] = img1[mask]\n comparison[~mask] = img2[~mask]\n else:\n raise ValueError('Wrong value for `method`. '\n 'Must be either \"diff\", \"blend\" or \"checkerboard\".')\n return comparison", "def ffmpeg_extract_subclip(filename, t1, t2, targetname=None):\n name, ext = os.path.splitext(filename)\n if not targetname:\n T1, T2 = [int(1000*t) for t in [t1, t2]]\n targetname = \"%sSUB%d_%d.%s\" % (name, T1, T2, ext)\n\n cmd = [get_setting(\"FFMPEG_BINARY\"),\"-y\",\n \"-ss\", \"%0.2f\"%t1,\n \"-i\", filename,\n \"-t\", \"%0.2f\"%(t2-t1),\n \"-vcodec\", \"copy\", \"-acodec\", \"copy\", targetname]\n\n subprocess_call(cmd)", "def AppendImages(im1, im2):\r\n im1cols, im1rows = im1.size\r\n im2cols, im2rows = im2.size\r\n im3 = Image.new('RGB', (im1cols+im2cols, max(im1rows,im2rows)))\r\n im3.paste(im1,(0,0))\r\n im3.paste(im2,(im1cols,0))\r\n return im3", "def squared_difference_image_filter(*args, **kwargs):\n import itk\n instance = itk.SquaredDifferenceImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def concatenate(video_clip_paths, output_path, method=\"compose\"):\n # create VideoFileClip object for each video file\n clips = [VideoFileClip(c) for c in video_clip_paths]\n if method == \"reduce\":\n # calculate minimum width & height across all clips\n min_height = min([c.h for c in clips])\n min_width = min([c.w for c in clips])\n # resize the videos to the minimum\n clips = [c.resize(newsize=(min_width, min_height)) for c in clips]\n # concatenate the final video\n final_clip = concatenate_videoclips(clips)\n elif method == \"compose\":\n # concatenate the final video with the compose method provided by moviepy\n final_clip = concatenate_videoclips(clips, method=\"compose\")\n # write the output video file\n final_clip.write_videofile(output_path)", "def ffmpeg_extract_subclip(filename, t1, t2, targetname=None):\n print('in ffmpeg_extract_subclip')#```````````````````````````````````````````````````````````````````\n name, ext = os.path.splitext(filename)\n if not targetname:\n T1, T2 = [int(1000*t) for t in [t1, t2]]\n targetname = \"%sSUB%d_%d.%s\" % (name, T1, T2, ext)\n\n cmd = [get_setting(\"FFMPEG_BINARY\"),\"-y\",\n \"-ss\", \"%0.2f\"%t1,\n \"-i\", filename,\n \"-t\", \"%0.2f\"%(t2-t1),\n \"-vcodec\", \"copy\", \"-acodec\", \"copy\", targetname]\n subprocess_call(cmd)", "def transform_images(img1,img2):", "def clips(md5, user_id=None):\n u = Upload.objects.filter(md5=md5).first()\n thing = Thing.objects.filter(files=u).first()\n if not u:\n abort(404)\n # load annotations\n if user_id:\n annotations = Reference.objects.filter(\n upload=u, creator=user_id).order_by('pos')\n else:\n annotations = Reference.objects.filter(upload=u).order_by('pos')\n clips = []\n\n for a in annotations:\n if a.pos_end:\n link = url_for(\"reference.figleaf\", md5=a.upload.md5,\n _anchor='%s-%s' % (a.pos, a.pos_end))\n y1, y2 = (a.pos, a.pos_end) if a.pos_end - \\\n a.pos < 1 else (int(a.pos), int(a.pos))\n if a.pos_end - a.pos > 1:\n a.note = '%s (%s pages)' % (\n a.note, int(a.pos_end) - int(a.pos) + 1)\n img = url_for(\"reference.preview\", filename=u.preview(\n filename='%s-%sx%s.jpg' % (y1, y2, 500)))\n clips.append((link, img, a.note, a.tags))\n\n return render_template('reference/clips.html',\n title=\"Clips from %s\" % thing.title,\n thing=thing,\n compiler=url_for('compiler.create',\n mode='from', value=md5),\n clips=clips\n )", "def crop (*args, **kwargs):\n return compute('crop', inputs=list(args), args=kwargs)", "def composite_scene(orig_scene, mask_seam, match_scene, dialation_mask, orig_scene1, method=\"paste\", repeat=1):\n avg_pixel = np.mean(orig_scene1[orig_scene1 != 0])\n \n output = np.zeros(orig_scene.shape)\n if method==\"seamlessclone\":\n width, height, _ = match_scene.shape\n center = (height/2, width/2)\n \n # create plain white mask\n mask = np.zeros(match_scene.shape, match_scene.dtype) + 255\n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = avg_pixel\n \n \n \n #image_to_compare\n output_blend = cv2.seamlessClone(match_scene.astype(np.uint8), \n orig_scene_impute.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n #implot(output_blend)\n # now reapply the mask with alpha blending to fix it up again.\n \n \"\"\"\n TO DO CHANGE IT FROM THE DILATION + MASK SEAM, NEED TO FIND THE INTERSECTION OF THESE TWO TO BE THE \n REAL MASK TO BLUR\n \"\"\"\n dilation_mask = mask_seam.copy()\n \n dilation_mask = cv2.GaussianBlur(dilation_mask, (101,101), 0) # blur mask and do a alpha blend... between the \n #implot(dilation_mask, 'gray')\n \n dilation_mask = dilation_mask/255.0\n \n \n \n # 0 is black, 1 is white\n #output = cv2.addWeighted(output_blend, dialation_mask, orig_scene, 1-dialation_mask)\n #print dialation_mask\n #print dialation_mask.shape\n #print output_blend.shape\n #a = cv2.multiply(output_blend.astype(np.float), dialation_mask)\n \n for _ in range(10):\n # some kind of layered alpha blend by the dilation mask values...\n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output = cv2.seamlessClone(match_scene.astype(np.uint8), \n output_blend.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n # complete blend with seamlessclone...\n \n \n # output = np.maximum(output_blend, orig_scene_impute)\n # or just darken...\n \n \n #if repeat == 1:\n # return output_blend\n #output = composite_scene(orig_scene_impute, mask_seam, output_blend, dialation_mask, method=\"paste\")\n \n\n\n elif method==\"paste\":\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n \n elif method==\"alphablend\":\n output_blend = output.copy()\n output_blend[mask_seam == 0] = orig_scene[mask_seam == 0]\n output_blend[mask_seam != 0] = match_scene[mask_seam != 0]\n \n \n \n \n else:\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n return output", "def subtract(x, y):\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)" ]
[ "0.624791", "0.57677054", "0.5656094", "0.5443441", "0.5401006", "0.5328892", "0.52844095", "0.52791625", "0.52341545", "0.5206452", "0.51734555", "0.51449054", "0.511724", "0.51007843", "0.50973564", "0.5077471", "0.5071751", "0.5052234", "0.5048019", "0.5021675", "0.4992363", "0.4979152", "0.49513298", "0.49451557", "0.49357137", "0.49274746", "0.48934782", "0.4889491", "0.4889153", "0.4850868" ]
0.62435144
1
Upload to slow.pics with given configuration
def upload_to_slowpics(self, config: SlowPicsConf = default_conf) -> None: # Upload to slow.pics all_images = [sorted((self.path / name).glob('*.png')) for name in self.clips.keys()] if self.path_diff: all_images.append(sorted(self.path_diff.glob('*.png'))) # type: ignore fields: Dict[str, Any] = {} for i, (name, images) in enumerate( zip(list(self.clips.keys()) + (['diff'] if self.path_diff else []), all_images) ): for j, (image, frame) in enumerate(zip(images, self.frames)): fields[f'comparisons[{j}].name'] = str(frame) fields[f'comparisons[{j}].images[{i}].name'] = name fields[f'comparisons[{j}].images[{i}].file'] = (image.name, image.read_bytes(), 'image/png') with Session() as sess: sess.get('https://slow.pics/api/comparison') # TODO: yeet this files = MultipartEncoder(config | fields) Status.info('Uploading images...') print() url = sess.post( 'https://slow.pics/api/comparison', data=files.to_string(), headers=_get_slowpics_header(str(files.len), files.content_type, sess) ) slowpics_url = f'https://slow.pics/c/{url.text}' Status.info(f'Slowpics url: {slowpics_url}') url_file = self.path / 'slow.pics.url' url_file.write_text(f'[InternetShortcut]\nURL={slowpics_url}', encoding='utf-8') Status.info(f'url file copied to "{url_file.resolve().to_str()}"')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_snaphot(self, url, filename):\n\n req_for_image = requests.get(url)\n IMAGE_FILE = BytesIO(req_for_image.content)\n\n img = Image.open(IMAGE_FILE)\n # here, we create an empty string buffer\n buffer = BytesIO()\n img.save(buffer, \"JPEG\", quality=60)\n buffer.seek(0)\n print(\"compressed\")\n\n # Do the actual upload\n file_attr = self.sftp.putfo(\n buffer, self.remote_snaphot_path + filename, confirm=False\n )\n\n return file_attr", "def upload_to(instance, filename):\n return upload_image_path(filename, 'products')", "def main(upload, config):\n upload.obj = uploader.Uploader(config=config)", "def pub_upload(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n upload_theme(args, base_url, api_key, prefix=project)", "def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise", "def file_upload():\n\n click.secho('*** Uploading image...', fg='green')\n uploaded = _uploaded_file('cover.jpg')\n click.secho(json.dumps(uploaded, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating a Picture document for it...', fg='green')\n picture = _make_document('picture', title='cover image', sys_filename=uploaded['path'])\n click.secho(json.dumps(picture, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Attaching it to a Blueray as cover...', fg='green')\n slp = _make_document('movie', title='Silver Linings Playbook')\n blueray = _make_document('blueray', movie_id=slp['_id'], cover_id=picture['_id'])\n click.secho(json.dumps(blueray, indent=2, sort_keys=True), fg='yellow')", "def upload_file(self, file_path, file_name, output_path):", "def __upload(self, filename):\n # Save to local path\n save_img = self.__frame.copy()\n\n # Initialize the bucket for after usage\n image_blob = None\n\n # Make the Google Cloud Storage client\n # and set the storage path\n if self.__yaml[\"bucket\"] is not None:\n client = storage.Client()\n bucket = client.get_bucket(self.__yaml[\"bucket\"])\n image_blob = bucket.blob(filename)\n\n # Upload and save the image\n try:\n if self.__yaml[\"output_path\"] is not None:\n # Save image in local\n LOGGER.info(f\"Saved {filename} in local folder\", )\n path = os.path.sep.join((self.__yaml[\"output_path\"], filename))\n cv2.imwrite(path, save_img)\n\n # Upload to Google Cloud Storage\n # if the user set the \"bucket\" option\n if self.__yaml[\"bucket\"] is not None:\n image_blob.upload_from_filename(os.path.sep.join((self.__yaml[\"output_path\"],\n filename)),\n content_type=\"image/jpeg\")\n\n LOGGER.info(f\"Saved {filename} to google cloud storage\")\n elif self.__yaml[\"bucket\"] is not None:\n # Convert numpy array to bytes\n temp_file = Image.fromarray(cv2.cvtColor(save_img, cv2.COLOR_BGR2RGB))\n temp_file_bytes = io.BytesIO()\n temp_file.save(temp_file_bytes,\n format=\"JPEG\")\n\n # Read the bytes from beginning\n temp_file_bytes.seek(0)\n image_blob.upload_from_file(temp_file_bytes,\n content_type=\"image/jpeg\")\n\n LOGGER.info(f\"Saved {filename} to google cloud storage\")\n except Exception as error:\n # If errors occur, just print the error messages\n # and don't exit the program\n LOGGER.warning(error)", "def put_object(local_path: str, file_name: str, configuration):\n pass", "def imgupload(self, accountid=None, upfile=None, cameraid=None, timestamp=None, duration=None, params=None):\n self.uploaddir = os.path.join(self.staticdir, 'uploads')\n print(\"UploadFile: Name: %s, Type: %s \" % (upfile.filename, upfile.content_type))\n fext = str(upfile.content_type).split('/')[1]\n print(\"Extension: %s\" % (fext))\n logging.info('Recieved request: {}/{}'.format(cameraid, timestamp))\n if not os.path.exists(self.uploaddir):\n logging.info('Upload directory does not exist, creating %s' % (self.uploaddir))\n os.makedirs(self.uploaddir)\n\n if upfile is not None:\n tsx = self.epoch()\n ofile = os.path.join(self.uploaddir, \"%s.%s\" % (tsx, fext))\n print(\"Local filename: %s\" % (ofile))\n ofilex = open(ofile, \"wb\")\n shutil.copyfileobj(upfile.file, ofilex)\n logging.info(\"Copied uploaded file as %s\" % (ofilex))\n ofilex.close()\n wwwbase = os.path.basename(self.staticdir)\n out = {'upimg': \"%s.%s\" % (tsx, fext) }\n infodb = self.dbase['infodb']\n\n info = {'upimg': \"{}.{}\".format(tsx, fext),\n 'epoch': tsx,\n 'accountid': accountid,\n 'timestamp': int(timestamp),\n 'duration' : int(duration),\n 'cameraid': cameraid,\n 'params': params}\n\n # Finally insert received object into db\n res = infodb.insert_one(info)\n print(res)\n return json.dumps(out)\n\n else:\n return \"Parameter: \\\"theFile\\\" was not defined\"", "def upload(self, filename, file_path):\n return", "def upload_file(name):\n subprocess.check_output(cmd_preamble + [\"cp\", name, f\"jot://{name}\"])", "def upload_image (auth_url, one_username, one_password, f, server_ip, server_username, server_password, image_dir, ssh_port=22, image_type = \"OS\"):\n import os\n\n try:\n ssh_scp_files(server_ip, server_username, server_password, f, image_dir, ssh_port)\n #ssh_transfer_files(server_ip, server_username, server_password, f, image_dir, ssh_port)\n\n # sife of the file in bytes\n size = os.path.getsize(f)\n # convert to MB\n size = int(size/(1024*1024))\n\n # Resgister the image\n conn = pyone.OneServer(\n auth_url,\n session=\"{0}:{1}\".format(one_username, one_password)\n )\n name, file_extension = os.path.splitext(f)\n description = f\n source = image_dir + f\n \n # find the default datastore\n dsid = 0\n datastores = conn.datastorepool.info()\n for ds in datastores.DATASTORE:\n if ds.NAME == \"default\":\n dsid = ds.ID\n break\n\n # creation of the image template and registration\n #template='''\\nNAME=\"%s\"\\nPATH=\"%s\"\\nTYPE=\"%s\"\\nDESCRIPTION=\"%s\"\\nSIZE=\"%d\"''' % \\\n template='''\\nNAME=\"%s\"\\nPATH=\"%s\"\\nTYPE=\"%s\"\\nDRIVER=\"qcow2\"\\nDESCRIPTION=\"%s\"\\nSIZE=\"%d\"''' % \\\n (name, source, image_type, description, size*3)\n logger.debug(\"template: {}\".format(template))\n logger.debug(\"DSID: {}\".format(dsid))\n r = conn.image.allocate(template,dsid)\n except Exception as e:\n logger.exception(\"Failed uploading image: {}\".format(str(e)))\n delete_remote_file(server_ip, server_username, server_password, str(image_dir + f), ssh_port)\n return \"Failed uploading image: {}\".format(str(e)), 400\n delete_remote_file(server_ip, server_username, server_password, str(image_dir + f), ssh_port)\n return \"Image uploaded successfully\", 201", "def project_uploader():\n if not current_app.config['S3_KEY']:\n return ''\n if len(request.files) == 0:\n return 'No files selected'\n img = request.files['file']\n if not img or img.filename == '':\n return 'No filename'\n ext = img.filename.split('.')[-1].lower()\n if ext not in ACCEPTED_TYPES:\n return 'Invalid format (allowed: %s)' % ','.join(ACCEPTED_TYPES)\n # generate a simpler filename\n keepcharacters = ('.', '_')\n safe_filename = img.filename.replace(' ', '_')\n safe_filename = \"\".join(\n c for c in safe_filename\n if c.isalnum() or c in keepcharacters).rstrip()\n if not safe_filename:\n safe_filename = \"\".join(random_password(8), '.', ext)\n # use random subfolder inside user id folder\n filename = '/'.join([\n str(current_user.id),\n random_password(24),\n safe_filename\n ])\n # with tempfile.TemporaryDirectory() as tmpdir:\n # img.save(path.join(tmpdir, filename))\n if 'S3_FOLDER' in current_app.config:\n s3_filepath = '/'.join([current_app.config['S3_FOLDER'], filename])\n else:\n s3_filepath = filename\n # print('Uploading to %s' % s3_filepath)\n if 'S3_ENDPOINT' in current_app.config:\n s3_obj = boto3.client(\n service_name='s3',\n endpoint_url=current_app.config['S3_ENDPOINT'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n #print('Uploading to endpoint %s' % current_app.config['S3_ENDPOINT'])\n else:\n s3_obj = boto3.client(\n service_name='s3',\n region_name=current_app.config['S3_REGION'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n #print('Uploading to region %s' % current_app.config['S3_REGION'])\n # Commence upload\n s3_obj.upload_fileobj(img,\n current_app.config['S3_BUCKET'],\n s3_filepath,\n ExtraArgs={'ContentType': img.content_type,\n 'ACL': 'public-read'}\n )\n return escape('/'.join([current_app.config['S3_HTTPS'], s3_filepath]))", "def upload_single(self, filepath, test=False):\n return self._gphotocli_image_tasks.upload_single(filepath, test)", "def imageUpload(query,callnum):\n\n source_file_name = \"data/imgsrc/{0}-{1}.png\".format(callnum,query)\n destination_blob_name = \"{0}-{1}.png\".format(callnum,query)\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(os.environ.get('CLOUD_STORAGE_BUCKET'))\n blob = bucket.blob(destination_blob_name)\n blob.upload_from_filename(source_file_name)\n\n\n # Make the blob publicly viewable.\n blob.make_public()\n image_public_url = blob.public_url\n print('Image {0}: {1} uploaded to {2}.'.format(callnum,\n source_file_name,\n destination_blob_name))\n\n return destination_blob_name\n #print(json.dumps(data))\n \"\"\"\n #writing to file, this erases the file if it already existed\n fin = open('data/{0}-{1}.json'.format(callnum,query),'w+')\n fin.close()\n\n fin = open('data/{0}-{1}.json'.format(callnum,query),'w+')\n fin.write(response)\n end = time.time()\n fin.close()\n \"\"\"", "def upload(ctx: click.Context, **kwargs):\n root_commands.cmd_upload(ctx.obj, **kwargs)", "async def capture_and_upload_screenshot(self) -> None:", "def upload_file():\r\n # Define an image object\r\n image_path = r'F:\\Testing_Development\\Projects\\Interface_requests\\Interface_requests\\upload_files\\Napoleon Bonaparte.jpg'\r\n file = {'file': open('Napoleon Bonaparte.jpg', 'rb')}\r\n # response = requests.post(base_url + '/post', files=file, timeout=3)\r\n response = requests.post(base_url + '/post', files=file)\r\n print(response.status_code)\r\n print(response.text)", "def test_procedure_picture_upload(self):\n image_upload_url = PROCEDURE_URL\n\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n\n payload = {\n 'name': 'temp',\n 'speciality': [self.speciality.pk],\n 'image': ntf,\n 'overview': 'bla bla bla'\n }\n\n res = self.client.post(\n image_upload_url,\n payload,\n format=\"multipart\"\n )\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertIn('image', res.data)", "def upload():\n global local_filename\n global task_id\n head_group_id = current_app.config['HEAD_GROUP_ID']\n if request.method == 'POST':\n user_id = request.form['user_id']\n followed_group_id = request.form['group_id']\n if not followed_group_id:\n return jsonify({'code': 400, 'msg': 'group_id is required.'})\n elif not user_id:\n return jsonify({'code': 400, 'msg': 'user_id is required.'})\n\n if 'photo' not in request.files:\n return jsonify({'code': 400, 'msg': 'photo is required.'})\n head_pic = request.files['photo']\n\n try:\n # 1、校验组是否存在\n group = get_group(head_group_id)\n if not group:\n return jsonify({'code': 400, 'msg': 'can not found group by id [{0}]'.format(head_group_id)})\n photos = UploadSet('photos', IMAGES)\n configure_uploads(current_app, photos)\n filename = photos.save(head_pic)\n local_filename = photos.path(filename)\n oss_dir = group[1]\n oss_filename = '{0}{1}'.format(group[1], filename)\n if not oss_dir.endswith(\"/\"):\n oss_filename = '{0}/{1}'.format(group[1], filename)\n\n # 2、获得图片的base64编码\n photo_base64 = base64.b64encode(open(local_filename, 'rb').read())\n encode_str = str(photo_base64, 'utf-8')\n\n # 3、通过第三方接口获得图片的特征值\n ret = r.post(current_app.config['GET_FEATURE_URL'], data=json.dumps({'image': encode_str}),\n headers={'content-type': 'application/json'})\n response = json.loads(ret.text)\n\n if 'body' in response:\n body = response['body']\n # 4、上传图片到oss\n oss_url = upload_file(oss_filename, local_filename)\n # 5、保存photo数据到db\n head_pic = get_image_by_muiti_condition(filename, oss_filename, int(head_group_id))\n global head_pic_id\n if not head_pic:\n head_pic_id = add_image(filename, oss_filename, int(head_group_id), json.dumps(body))\n else:\n head_pic_id = head_pic[0]\n update_image(filename, oss_filename, int(head_group_id), json.dumps(body), head_pic_id)\n # 6、保存用户、头像图片和关注组的关系\n face = get_face_by_user_id_and_grou_id(user_id, followed_group_id)\n if not face:\n add_face(user_id, followed_group_id, head_pic_id)\n else:\n update_face(user_id, followed_group_id, head_pic_id, face[0])\n # 7、添加一个相似图片查找任务,使用用户头像去所关注的组中找相似的图片,结果缓存到结果表\n task_id += 1\n Thread(target=find_similar_task,\n args=(current_app._get_current_object(), \"face_%s\" % task_id, followed_group_id, head_pic_id,)\n ).start()\n\n # 8、返回头像保存结果\n return jsonify(\n {'code': 200, 'image_id': head_pic_id, 'url': oss_url,\n 'url_express': current_app.config['OSS_URL_EXPIRES'],\n 'msg': 'modify head image success.'})\n else:\n return jsonify(response)\n except Exception as e:\n traceback.print_exc()\n return jsonify({'code': 500, 'error': '{0}'.format(e)})\n finally:\n # 6、删除临时图片\n try:\n if os.path.isfile(local_filename):\n os.remove(local_filename)\n except FileNotFoundError:\n print(\"delete not exits file\")\n except Exception:\n traceback.print_exc()\n else:\n return jsonify({'code': 400, 'msg': 'upload image failed.'})", "def upload_progress(self, cloud_file, size, uploaded):", "def upload_preset(self, filename, title, description, version, author, REQUEST=None):\r\n\r\n # TODO presets.py - upload_preset - specify how to authenticate\r\n\r\n raise NotImplementedError", "def put( filename, file_type = 'auto', history_id = None ):\n conf = _get_conf()\n gi = get_galaxy_connection()\n tc = ToolClient( gi )\n history_id = history_id or _get_history_id()\n tc.upload_file(filename, history_id, file_type = file_type)", "def upload(self, upload_request):\n raise NotImplementedError", "def upload():\n run('mkdir -p /srv/images/'+env.project_name+'/')\n rsync_project(\n env.project_dir, './',\n exclude=(\n '.git', '.gitignore', '__pycache__', '*.pyc', '.DS_Store', 'environment.yml',\n 'fabfile.py', 'Makefile', '.idea', 'bower_components', 'node_modules',\n '.env.example', 'README.md', 'var'\n ), delete=True)", "def upload(self, img):\n # create a connection to sendspace\n (upl_url, upl_max_size, upl_id, upl_extra_info) = self.connect()\n # return the (download_url, delete_url) of the image\n return self.uploadImage(upl_url, upl_max_size,\n upl_id, upl_extra_info, img)", "def upload_files(self, context, instance_ref, bless_files):\n raise Exception(\"Uploading files to the image service is not supported.\")", "def start_upload(self, group_name=None):\n rmt = BossRemote(cfg_file_or_dict=self.args.config)\n\n type_to_dtype = {'image': 'uint16', 'annotation': 'uint64'}\n\n img = tf.imread(os.path.expanduser(self.args.tif_stack))\n if self.args.type == 'annotation' and img.dtype != 'uint64':\n img = np.asarray(img, dtype='uint64')\n\n coll_name = self.args.collection\n exp_name = self.args.experiment\n chan_name = self.args.channel\n source_chan = []\n\n if self.args.source_channel != None:\n source_chan = [self.args.source_channel]\n\n # upload image back to boss\n channel_rsc = self._get_channel_resource(rmt, chan_name, coll_name, exp_name, type=self.args.type, sources=source_chan, datatype=type_to_dtype[self.args.type], new_channel=self.args.new_channel)\n\n if img.dtype != 'uint64' or img.dtype != 'uint16':\n if self.args.type == 'image':\n img = img.astype('uint16')\n else:\n img = img.astype('uint64')\n\n if not self.args.chunk:\n self._upload_to_boss(rmt, img, channel_rsc)\n else:\n self._upload_chunk_to_boss(rmt, img, channel_rsc, x_range=self.args.x_range, y_range=self.args.y_range, z_range=self.args.z_range)\n\n url = 'https://ndwebtools.neurodata.io/ndviz_url/{}/{}/'.format(coll_name, exp_name)\n\n if group_name:\n self._change_permissions(group_name)\n\n return url", "def upload(self, task_id, task_json_path, tips_json_path, asset_json_path,\n upload_json_path, max_speed=None, transmit_type=\"upload_json\",\n engine_type=\"aspera\", server_ip=None, server_port=None,\n network_mode=0, is_record=False, redis_flag=None, redis_obj=None):\n config_file_list = [\n task_json_path,\n tips_json_path,\n asset_json_path,\n upload_json_path\n ]\n result_config = self.upload_config(task_id, config_file_list, max_speed,\n engine_type=engine_type, server_ip=server_ip, server_port=server_port,\n network_mode=network_mode)\n if not result_config:\n return False\n result_asset = self.upload_asset(upload_json_path, max_speed, transmit_type,\n engine_type=engine_type, server_ip=server_ip, server_port=server_port,\n network_mode=network_mode, is_record=is_record, redis_flag=redis_flag,\n redis_obj=redis_obj)\n if not result_asset:\n return False\n return True" ]
[ "0.6390859", "0.61168593", "0.607823", "0.59952694", "0.59203124", "0.5899646", "0.5867831", "0.5843152", "0.580407", "0.57941586", "0.5784766", "0.57775533", "0.56902266", "0.56626356", "0.56574404", "0.5635237", "0.5611183", "0.557807", "0.5567737", "0.55670696", "0.55513316", "0.55477357", "0.5542864", "0.5531265", "0.5527408", "0.55263984", "0.55195343", "0.5513915", "0.55053455", "0.5500741" ]
0.8017105
0
Convenience method to create a user manually.
def create_user(self): User.objects.create_user('test', '[email protected]', 'testing')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def create_user(self, **kwargs):\n kwargs = self._prepare_create_user_args(**kwargs)\n user = self.user_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(user)", "def create_user(self, **kwargs):\n\n user = self.user_model(**self._prepare_create_user_args(**kwargs))\n return self.put(user)", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='[email protected]', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n\n return user", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n return user", "def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user", "def create_new_user():\n return get_user_model().objects.create_user(\n email='[email protected]',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def create_form_user(self, **kwargs):\n user = User.objects.create_user(\n **kwargs\n )\n return user", "def create(self, validated_data: dict):\n return User.objects.create_user(**validated_data)", "def create_user(email='[email protected]', password='testpass123'):\n return get_user_model().objects.create_user(email=email, password=password)", "def create_user(self, *args, **kwargs):\n user = User.objects.create_user(*args, **kwargs)\n return get_profile(user)", "def create_a_user(self, username='fry', email='[email protected]', password='Qwerty!234'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def create():\n api_request = apireq.APIRequest(request, 'client_schema')\n if api_request.is_invalid():\n return api_request.error_text, 400\n return user_management.create_user(api_json['username'])", "def create_user(self, username=None, email=None, password=None):\n\t\treturn self._create_user(username, email, password)", "def create_user(self, email_or_phone, password=None, **extra_fields):\n return self._create_user(email_or_phone, password, False, False, **extra_fields)", "def sample_user(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def create_user(user_name: str):\n user = User()\n user.username = user_name\n user.save()\n return user", "def create_user(self):\n return UserFactory.create()", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create_user(email, password, f_name, l_name):\n pass", "def sample_user(email: str = \"[email protected]\", password: str = \"testpass\"):\n return get_user_model().objects.create_user(email, password)" ]
[ "0.84284955", "0.8256174", "0.81976753", "0.8048271", "0.7933476", "0.7924386", "0.79241693", "0.79064405", "0.7898101", "0.78852206", "0.7818084", "0.7807617", "0.77973396", "0.7777655", "0.7771802", "0.77607566", "0.775554", "0.7746862", "0.77113354", "0.7707604", "0.7696037", "0.7681807", "0.7681807", "0.7681807", "0.7681807", "0.7681807", "0.7681807", "0.7681807", "0.7677297", "0.7652827" ]
0.8268673
1
Convenience method that returns the value returned from posting /api/register with test user data
def post_user(self): return self.client.post('/api/register', {'username': 'test', 'password': 'testing'}, format='json')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_user(self):\n response = self.client.post(self.register_url, self.register_data, format='json')\n return response", "def test_register(self):\n # Register good data\n data = mock_data['register']\n data = json.dumps(data)\n response = self.client.post(\n 'api/v2/auth/signup', content_type=\"application/json\", data=data)\n data = json.loads(response.data)\n self.assertEqual(data['message'], 'User registered')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('user' in data)", "def post(self):\n data = request.json\n\n register(data)\n return \"User Successfully Registered\", 200", "def user_register():\n \n data = user_obj.user_register(request.forms) \n return data", "def test_register(self):\n body = User()\n response = self.client.open(\n '/api/v1/register',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def register():\n result = register_helper(User)\n return jsonify(result[0]), result[1]", "def test_registration(self):\n\n print(\" --------------------------- Test 1 - Registration ----------------------------\")\n user_id = uuid.uuid4()\n password = \"my-precious\"\n currency = \"EUR\"\n\n response = register_user(user_id, password, currency)\n data = response.json()['message']\n self.assertEqual(response.json()['code'], 201)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(response.headers['Content-Type'] == 'application/json')\n print(json.dumps(data, indent=4))", "def test_register_user_with_valid_data(self, app):\n data = RegisterUser.random()\n res = app.register.register(data=data, type_response=RegisterUserResponse)\n assert res.status_code == 201\n assert res.data.message == ResponseText.MESSAGE_REGISTER_USER", "def test_registration_successful(self):\n self.response = self.client.post(\n \"/api/users/\",\n {\"user\": {\n \"username\": \"kake\",\n \"email\": '[email protected]',\n \"password\": \"123445abcdefghijk\",\n }\n },\n format=\"json\"\n )\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)", "def test_register_view(self):\n url = reverse('xds_api:register')\n\n response = self.client.post(url, self.userDict)\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(responseDict['token'] is not None)\n self.assertTrue(responseDict['user'] is not None)", "def test_register(self):\n client = APIClient()\n payload = {\n 'email': self.email,\n 'first_name': self.first_name,\n 'last_name': self.last_name,\n 'oauth_id': self.oauth_id,\n 'password': self.password\n }\n response = client.post('/users/', payload, format='json')\n\n # Check the response status code\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Check the user was correctly created directly in the database\n user = User.objects.get(email=self.email)\n self.assertNotEqual(user.password, self.password)\n self.assertTrue(user.check_password(self.password))\n\n # Check the user was correctly created by doing an HTTP query\n response = client.get('/users/').json()\n self.assertEqual(len(response), 1)\n self.assertEqual(response[0]['email'], self.email)\n self.assertFalse('password' in response[0])", "def test_register(self):\n u = User(first_name = \"David\",\n last_name = 'Smith',\n password='******',\n email='[email protected]',\n phone_number='012-345-6789')\n response = self.register({\n 'first_name': u.first_name,\n 'last_name': u.last_name,\n 'password': u.password,\n 'email': u.email,\n 'phone_number': u.phone_number\n })\n self.assertEqual(response.status_code, 302)\n response = self.client.get(response.url)\n self.assertDictEqual(response.json(), self.client.get(reverse('backend:user_details', args=(response.json()['pk'],))).json())", "def test_signup(self):\n \n user = {\n 'firstname' : 'Caleb',\n 'lastname' : 'Mbugua',\n 'username' : 'MbuguaCaleb',\n 'email' : '[email protected]',\n 'password' : 'Calebmbugua1#',\n 'phone_number' : '0704699193'\n }\n \n\n response = self.client.post('/api/v1/register', json=user, headers={'Content-Type': 'application/json'})\n data = response.get_json()\n\n self.assertEqual(response.status_code, 201)\n self.assertEqual(data['status'], 201)\n self.assertEqual(data['message'], 'User created successfully')\n self.assertEqual(data['data']['username'], user['username'])", "def register():\n # Validate and deserialize input\n json_data = request.get_json()\n if not json_data:\n return CustomResponse(\n message=Constant.response.NO_INPUT_DATA\n ).response()\n\n user = UserService(data=json_data).create_user()\n return CustomResponse(data=user).response()", "def test_create_user(self):\n url = reverse('rest_register')\n data = {\n 'email': '[email protected]',\n 'password1': 'notshortpassword',\n 'password2': 'notshortpassword'\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().email, '[email protected]')", "def register():\n data = request.get_json()\n username = data[\"username\"]\n password = data[\"password\"]\n client_data = data[\"client_data\"]\n if register_user(username, password, client_data):\n return \"1\"\n else:\n return \"0\"", "def test_registration(self):\n response = self.client_app.post(\n '/api/v1/auth/signup/',\n data=json.dumps(dict(\n last_name='james',\n email='[email protected]',\n password='123456sddfdf'\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['email'] == '[email protected]')\n self.assertTrue(data['first_name'] is None)\n self.assertTrue(data['last_name'] == 'james')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 201)", "def test_register_new_user(self):\n with self.client:\n response = self.client.post(\n url_for('register'),\n data=dict(\n first_name='Admin',\n last_name='Admin',\n email='[email protected]',\n password='admin2016',\n confirm_password='admin2016'\n ),\n follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)", "def test_register_user_successfully(self):\n\n response = self.client.post(\n self.reg_url,\n self.base.reg_data,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertIn(b\"successfully\", response.content)", "def test_registration(self):\n response = self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n username='joe',\n password='123456'\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Successfully registered.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 201)", "def register():\n data = None\n response = {\n 'status': 400,\n 'error': 'Provide: firstname, lastname, email, othername, phone_number, and password as json.'\n }\n try:\n data = request.get_json()\n except:\n return jsonify(response), 400\n\n if not data:\n return jsonify(response), 400\n\n user_data = {\n 'firstname': data.get('firstname'),\n 'lastname': data.get('lastname'),\n 'email': data.get('email'),\n 'password': data.get('password'),\n 'othername': data.get('othername'),\n 'phone_number': data.get('phone_number'),\n 'is_admin': data.get('is_admin'),\n 'is_politician': data.get('is_politician')\n }\n valdiator_result = Validator.validate_user(user_data)\n if isinstance(valdiator_result, dict):\n return jsonify(valdiator_result), valdiator_result['status']\n if isinstance(valdiator_result, bool) and valdiator_result:\n result = politico.register_user(user_data)\n\n response = {}\n if result == 'User added':\n # return a response notifying the user that they registered successfully\n response['status'] = 201\n response['data'] = []\n response['data'].append({\n 'message': 'User registered successfully'\n })\n elif result == 'Other name taken':\n # return a response notifying the user that othername is taken\n response['status'] = 409\n response['error'] = 'The othername you chose is taken'\n elif result == 'User already exists':\n # notify the user that an account with the same email is already registered\n response['status'] = 409\n response['error'] = 'User already exists'\n return make_response(jsonify(response), response['status'])", "def test_register_user(self):\n x = client.post(REGISTER_URL, json=self.post_data1)\n\n user_expected = db_session.query(User).filter_by(username=self.post_data1[\"username\"]).first()\n self.assertEqual(x.status_code, 201)\n self.assertIsNotNone(user_expected)\n self.assertEqual(\"Barry Allen\", user_expected.username)", "def test_post_user(self):\n response = self.client.post('/api/v1/users', data = json.dumps(self.user), content_type='application/json')\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"], \"successful registration\", msg = \"Registration failed\")\n self.assertEqual(response.status_code, 201)", "def register():\n if request.method == 'POST':\n details = request.get_json()\n name = details.get('name')\n email = details.get('email')\n national_id = details.get('national_id')\n is_admin = details.get('admin')\n password = details.get('password')\n confirm_pwd = details.get('confirm_pwd')\n if len(name) < 4:\n return make_response(jsonify(\n {'message': 'name must be four letters or more!'}\n )), 409\n if not User.validate_email(email):\n return make_response(jsonify(\n {'message': \"Invalid email\"}\n )), 409\n if password != confirm_pwd:\n return make_response(jsonify(\n {'message': 'password mistmatch'}\n )), 400\n if len(password) < 4:\n return make_response(jsonify(\n {'message': 'password too short'}\n )), 409\n if str(national_id).isalpha():\n return make_response(jsonify(\n {'message': 'national id must be digits'}\n )), 400\n if name.isdigit():\n return make_response(jsonify(\n {'message': 'Name must be an alphabet'}\n )), 400\n user = User.query.filter_by(email=email).first()\n if user:\n return make_response(jsonify(\n {'message': 'user already registred, login'}\n )), 200\n user = User(\n name=name,\n email=email,\n national_id=national_id,\n password=password,\n is_admin=is_admin)\n user.save_user()\n auth_token = user.token_generate(user.id)\n return make_response(jsonify(\n {\n 'message': 'registration successfull',\n 'token': auth_token.decode()\n }\n )), 201\n return None", "def register():\n register_form = RegisterForm() # We're only getting stuff from JSON now\n if not register_form.validate():\n return jsonify({\n \"errors\": register_form.errors.items(),\n \"success\": False,\n \"user\": None,\n \"sent_json\": request.json\n })\n\n user = User.create(username=request.json['username'], password=request.json['password'])\n\n g.user = user\n\n return jsonify({\n \"errors\": [],\n \"success\": True,\n \"user\": g.user.username,\n \"sent_json\": request.json\n })", "def test_register_user(self):\n url = reverse('signup')\n data = {'email': '[email protected]', 'password': 'yayhooray'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(get_user_model().objects.count(), 1)\n self.assertEqual(get_user_model().objects.get().email, '[email protected]')", "def test_successful_registration(self):\n with self.client:\n response = register_user(\n self, 'Random', 'User', '[email protected]', 'aaaAAA111')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] ==\n \"Account for '[email protected]' has been created.\")\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 201)", "def test_user_registration(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(1,result,\"User registration successful\")", "def register_post():\n logger.info(\"entering function register_post\")\n response = register_user(request.json)\n logger.info(\"exiting function register_post\")\n return jsonify(response)", "def create_user(self, data):\n return self.client.post(\n path='/api/v2/auth/signup/', data=json.dumps(data), content_type='application/json')" ]
[ "0.79784954", "0.76484907", "0.7586805", "0.75265926", "0.751983", "0.7486969", "0.74536526", "0.7415454", "0.73605394", "0.73265904", "0.73183036", "0.7305393", "0.727419", "0.7264769", "0.7246377", "0.72366166", "0.72164387", "0.7209809", "0.72043324", "0.7199858", "0.71688455", "0.71661836", "0.71612775", "0.7154232", "0.71410865", "0.71386683", "0.71291417", "0.7128525", "0.7124638", "0.7071614" ]
0.8221954
0
Signs and timestamps a string so it cannot be forged. Normally used via set_secure_cookie, but provided as a separate method for noncookie uses. To decode a value not stored as a cookie use the optional value argument to get_secure_cookie.
def create_signed_value(self, name, value): timestamp = str(int(time.time())) value = base64.b64encode(value) signature = self._cookie_signature(name, value, timestamp) value = "|".join([value, timestamp, signature]) return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_secure_cookie( name, value=None ):", "def set_secure_cookie( name, value, **kwargs ):", "def decode_signed_value( name, value ):", "def create_signed_value( name, value ):", "def sign(self, value: str, timestamp: int = None) -> str:\n timestamp = timestamp or int(time.time())\n return self.sep.join(\n [\n self.encode_value(value).decode(),\n self.encode_int(timestamp).decode(),\n self.signature(value, timestamp),\n ]\n )", "def sign(self, content):\n cookie = base64.encodestring(\n hmac.new(self.secret, content, sha1).digest() +\n cookie_m.make_time(time.time() + 60*self.timeout) +\n content)[:-1]\n cookie = cookie.replace(\"/\", \"_\").replace(\"=\", \"~\").replace(\"\\n\", \"\")\n\n if len(cookie) > self.maxlen:\n raise cookie_m.CookieTooLarge(content, cookie)\n return cookie", "def make_secure_val(string):\n\n return \"%s|%s\" % (string, hash_str(string))", "def make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())", "def make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())", "def make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())", "def serialize_to_signature(cls, value):\n if isinstance(value, bytes):\n value = value.decode('utf-8')\n\n return value", "def make_secure_val(val):\n return \"%s|%s\" % (val, hmac.new(secret, val).hexdigest())", "def encrypt_cookie_value(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())", "def __sign(self, text):\n signature = HMAC.new(self.sign_key, text.encode('utf-8'), SHA256).digest()\n return base64.standard_b64encode(signature)", "def sign(self, message):\n\n # if not already a byte string turn it to making sure\n if not isinstance(message, (bytes, str)):\n return None\n elif isinstance(message, str):\n message = message.encode()\n\n hash_of_message = SHA256.new(message)\n\n signer = DSS.new(self.privkey, mode=\"fips-186-3\")\n\n digital_signature = signer.sign(hash_of_message)\n digital_signature = base64.b85encode(digital_signature).decode()\n\n return digital_signature", "def _sign_string(message, private_key_file=None, private_key_string=None):\r\n try:\r\n from M2Crypto import EVP\r\n except ImportError:\r\n raise NotImplementedError(\"Boto depends on the python M2Crypto \"\r\n \"library to generate signed URLs for \"\r\n \"CloudFront\")\r\n # Make sure only one of private_key_file and private_key_string is set\r\n if private_key_file and private_key_string:\r\n raise ValueError(\"Only specify the private_key_file or the private_key_string not both\")\r\n if not private_key_file and not private_key_string:\r\n raise ValueError(\"You must specify one of private_key_file or private_key_string\")\r\n # if private_key_file is a file object read the key string from there\r\n if isinstance(private_key_file, file):\r\n private_key_string = private_key_file.read()\r\n # Now load key and calculate signature\r\n if private_key_string:\r\n key = EVP.load_key_string(private_key_string)\r\n else:\r\n key = EVP.load_key(private_key_file)\r\n key.reset_context(md='sha1')\r\n key.sign_init()\r\n key.sign_update(str(message))\r\n signature = key.sign_final()\r\n return signature", "def decrypt_cookie_value(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == encrypt_cookie_value(val):\n return val", "def set_secure_cookie(self, name, val, remember):\n\n cookie_val = make_secure_val(val)\n cookie_str = '%s=%s; Path=/;' % (name, cookie_val)\n if remember:\n expires = time.time() + 5000 * 24 * 3600 # 5000 days from now\n else:\n expires = time.time() + 24 * 3600\n expires_str = time.strftime(\"%a, %d-%b-%Y %T GMT\",\n time.gmtime(expires))\n expires_date = 'expires= %s;' % expires_str\n cookie_str += expires_date\n self.response.headers.add_header('Set-Cookie', cookie_str)", "def get_signed(self, sig_str):\n sig_str = base64.b64encode(sig_str)\n signature = base64.b64encode(hmac.new(self.secret, sig_str, digestmod=hashlib.sha1).digest())\n return signature", "def _sign(self, data, salt):\r\n strBuffer = \"\"\r\n # print data.keys()\r\n for k in sorted(data.iterkeys()):\r\n\r\n # Handle the BOOL special case\r\n v = data[k]\r\n if type(v) == bool:\r\n if v:\r\n v = 1\r\n else:\r\n v = 0\r\n data[k] = v\r\n\r\n # Update buffer\r\n strBuffer += \"%s=%s\\n\" % (str(k).lower(), vmcp.myquote(str(v)))\r\n\r\n # Append salt\r\n strBuffer += salt\r\n return strBuffer", "def set(self, name, value, timestamp=None, expires_days=30, **kwargs):\n \n timestamp = timestamp and timestamp or str(int(time.time()))\n value = base64.b64encode(value)\n args = (name, value, timestamp)\n signature = _generate_cookie_signature(self._cookie_secret, *args)\n value = \"|\".join([value, timestamp, signature])\n \n max_age = None\n if expires_days:\n if not isinstance(expires_days, int):\n raise TypeError(u'%s must be an `int`' % expires_days)\n max_age = expires_days * 24 * 60 * 60\n \n return self.response.set_cookie(\n name, \n value=value, \n max_age=max_age, \n **kwargs\n )", "def set_cookie(self, name, value, #signed=False,\n **options):\n\n if value is not None and \\\n not isinstance(value, (basestring, bool, int, float, long)):\n raise ValueError('Cookie value is not a valid type, ' +\n 'should be one of basestring/' +\n 'None/True/False/int/float/long'\n )\n\n value = str(value)\n\n if len(value) > 4096:\n raise ValueError('Cookie value is too long (%sb), max length is 4096' %\n len(value))\n\n self.cookies[name] = value\n\n for opt, value in options.iteritems():\n opt = opt.lower()\n\n if opt == 'expiry':\n opt = 'expires'\n\n if opt == 'expires':\n if isinstance(value, bool) or \\\n not isinstance(value, (int, float, date, datetime)):\n raise TypeError('expires value for cookie is invalid, '+\n 'should be one of datetime.date/datetime.datetime/'+\n 'int/float')\n\n if isinstance(value, (int, float)):\n value = time.gmtime(value)\n elif isinstance(value, (date, datetime)):\n value = value.timetuple()\n\n value = time.strftime(\"%a, %d %b %Y %H:%M:%S GMT\", value)\n\n if opt == 'max_age':\n if isinstance(value, int):\n pass\n elif isinstance(value, timedelta):\n value = (value.days * SECONDS_IN_DAY) + value.seconds\n else:\n raise TypeError('max_age value for cookie is invalid, '+\n 'should be one of datetime.timedelta/int')\n\n if opt == 'path':\n value = value.strip()\n if value and value[0] is not '/':\n raise TypeError('path value for cookie is invalid')\n\n self.cookies[name][opt.replace('_', '-')] = value", "def sign(self, data):\n from base64 import urlsafe_b64encode\n\n if self.sign_private == \"\":\n raise ValueError(\"Error signing: No private signing key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.sign_private)\n signature = key_private.Sign(data)\n return urlsafe_b64encode(signature)", "def sign_message(self, message):\n if self.private_key:\n if isinstance(message, str):\n utf8 = message.encode('utf-8')\n else:\n raise TypeError(\"message must be a string.\")\n signature = self.private_key.sign(utf8).to_base64()\n return signature\n else:\n return None", "def make_cookie_hash(cleartext):\n return \"%s|%s\" % (cleartext,hashlib.sha256(SECRET + cleartext).hexdigest())", "def sign(self, payload):\n raise NotImplementedError", "def serialize_to_signature(cls, value):\n raise NotImplementedError", "def get(self, name, value=None):\n \n if value is None:\n value = self.request.cookies.get(name, None)\n \n if value is None:\n return None\n \n parts = value.split(\"|\")\n if len(parts) != 3: \n return None\n \n timestamp = int(parts[1])\n if timestamp < time.time() - 31 * 86400:\n logging.warning(\"Expired cookie %r\", value)\n return None\n \n args = (name, parts[0], parts[1])\n signature = _generate_cookie_signature(self._cookie_secret, *args)\n \n if not _time_independent_equals(parts[2], signature):\n logging.warning(\"Invalid cookie signature %r\", value)\n return None\n \n try:\n return base64.b64decode(parts[0])\n except TypeError:\n return None", "def serialize_and_sign_payload(payload):\n secret = workspace_config.secret\n serializer = URLSafeTimedSerializer(secret)\n return serializer.dumps(payload)", "def serialize_to_signature(value):\n serialization_cls = _get_serializer_for_value(value, serializing=True)\n\n if serialization_cls is None:\n raise TypeError(\n 'Unsupported type %s passed to serialize_to_signature(). '\n 'Value: %r'\n % (type(value), value))\n\n return serialization_cls.serialize_to_signature(value)" ]
[ "0.6637055", "0.6335417", "0.6251725", "0.6224434", "0.6218173", "0.61984545", "0.59713936", "0.57362914", "0.57362914", "0.57362914", "0.57331383", "0.57247233", "0.57106113", "0.5667315", "0.5567969", "0.55134684", "0.550191", "0.5480218", "0.53795123", "0.53457123", "0.5333211", "0.53193647", "0.52844715", "0.5274812", "0.51800346", "0.5150149", "0.51076806", "0.50791943", "0.5065161", "0.50362295" ]
0.6966465
0
if one client sends the correct answer, that will close the current question and send both of them a wait delay. If the second client ignores that and sends a timed_out that should then be ignored in favor of the new question
def test_timed_out_after_correct_answer(self): (user, client), (user2, client2) = self._create_two_connected_clients() battle = client.current_client_battles[str(user._id)] battle.rules['no_questions'] = 2 self._create_question() self._create_question() battle.min_wait_delay -= 3 client.on_message(dict(next_question=1)) self.assertTrue(client._sent[-1]['question']) self.assertTrue(client2._sent[-1]['question']) client.on_message(dict(answer='Yes')) client2.on_message(dict(timed_out=1)) self.assertTrue(client._sent[-2]['update_scoreboard']) self.assertTrue(client2._sent[-2]['update_scoreboard']) self.assertTrue(client._sent[-1]['wait']) self.assertTrue(client2._sent[-1]['wait'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_timed_out_after_wrong_answer(self):\n (user, client), (user2, client2) = self._create_two_connected_clients()\n battle = client.current_client_battles[str(user._id)]\n battle.rules['no_questions'] = 3\n self._create_question()\n self._create_question()\n self._create_question()\n\n battle.min_wait_delay -= 3\n client.on_message(dict(next_question=1))\n self.assertTrue(client._sent[-1]['question'])\n self.assertTrue(client2._sent[-1]['question'])\n\n client.on_message(dict(answer='WRONG'))\n battle.current_question_sent -= battle.rules['thinking_time'] # fake time\n client2.on_message(dict(timed_out=1))\n self.assertTrue(client._sent[-1]['wait'])\n self.assertTrue(client2._sent[-1]['wait'])", "def test__API_with_wrong_answer(self):\n self.mock_connection.state = MockConnection.WRONG_NUM_OF_CONFIRMATIONS\n\n # timeout supposed to be here\n self.assertEqual(self.mutex.lock(), False) # acquire mutex", "def testTerminateResponseWithServerCloseIn2ndValue(self):\n self.client_connect()\n self.client_send('get someWholeVal someChoppedVal\\r\\n')\n self.mock_recv(\"get someWholeVal someChoppedVal\\r\\n\")\n self.mock_send('VALUE someWholeVal 0 10\\r\\n')\n self.mock_send('0123456789\\r\\n')\n self.mock_send('VALUE someChoppedVal 0')\n self.mock_close()\n self.client_recv('VALUE someWholeVal 0 10\\r\\n0123456789\\r\\nEND\\r\\n')", "def _wait_what(self, expected):\r\n \r\n self._msg_server(cb.WAITWHATSERVER % (expected))", "def test__API_with_correct_answers(self):\n self.mock_connection.state = MockConnection.CORRECT_NUM_OF_CONFIRMATIONS\n\n # mutex must be acquired\n self.assertEqual(self.mutex.lock(), True) # acquire mutex\n self.mutex.unlock() # release mutex", "def testTerminateResponseWithServerCloseIn2ndValueData(self):\n self.client_connect()\n self.client_send('get someWholeVal someChoppedVal\\r\\n')\n self.mock_recv(\"get someWholeVal someChoppedVal\\r\\n\")\n self.mock_send('VALUE someWholeVal 0 10\\r\\n')\n self.mock_send('0123456789\\r\\n')\n self.mock_send('VALUE someChoppedVal 0 10\\r\\n')\n self.mock_send('012345')\n self.mock_close()\n self.client_recv('VALUE someWholeVal 0 10\\r\\n0123456789\\r\\nEND\\r\\n')", "def testTerminateResponseWithServerClose(self):\n self.client_connect()\n self.client_send('set chopped 0 0 1\\r\\n')\n self.client_send('1\\r\\n')\n self.mock_recv(\"set chopped 0 0 1\\r\\n1\\r\\n\")\n self.mock_close()\n self.client_recv('.*ERROR .*\\r\\n')", "def check_answer(update: Update, context: CallbackContext):\n cleaned_text = update.message.text.strip().lower()\n cleaned_soln = context.chat_data['solution'].strip().lower()\n if cleaned_text == cleaned_soln:\n\n # Cancel current question\n chat_id = update.message.chat_id\n data = {'chat_id': chat_id,'context': context}\n chat_jobs = context.job_queue.get_jobs_by_name(str(chat_id))\n for job in chat_jobs:\n job.schedule_removal()\n\n # Update chat with answer\n name = update.message.from_user.first_name\n soln = context.chat_data['solution']\n update.message.reply_text(f'Correct! {name} got the right answer! It was {soln}.')\n\n # Prevent answer trigger\n context.chat_data['solution'] = ''\n\n # Update scores\n user_id = update.message.from_user.id\n if user_id not in context.chat_data['user']:\n context.chat_data['user'][user_id] = dict()\n context.chat_data['user'][user_id]['name'] = name\n context.chat_data['user'][user_id]['points'] = context.chat_data['user'][user_id].get('points', 0) + 1\n\n # Schedule run_quiz if there are more questions\n if context.chat_data['number'] < context.chat_data['total']:\n context.job_queue.run_once(run_quiz, 3, context=data, name=str(chat_id))# Delay time to next question, question answered\n\n # Schedule end_quiz if there are no more questions\n else:\n context.job_queue.run_once(end_quiz, 3, context=data, name=str(chat_id))# Delay time to end quiz, question answered", "def main_loop(self, old_answers=None, old_correct_list=None):\r\n if old_answers is None:\r\n old_answers = []\r\n if old_correct_list is None:\r\n old_correct_list = []\r\n raw_answers, is_correct_list = self.ask_question(old_answers,\r\n old_correct_list)\r\n\r\n if sum(is_correct_list) == len(self.correct_answers):\r\n user_response = self.display_correct_window()\r\n else:\r\n user_response = self.display_incorrect_window(is_correct_list)\r\n if user_response == 'Try Again':\r\n self.main_loop(raw_answers, is_correct_list)\r\n if user_response == 'Show Answers':\r\n user_response = self.show_answers(self.correct_answers,\r\n raw_answers, is_correct_list)\r\n if user_response == 'Show Solution':\r\n user_response = self.show_solution()\r\n return user_response", "async def wouldyourather(message: discord.Message, opt: options=None):\n # If there are no options, the bot will ask the questions (if there are any to choose from)\n if opt is None:\n assert message.channel.id not in sessions, \"**A would you rather session is already in progress.**\"\n sessions.add(message.channel.id)\n\n assert db.data[\"questions\"], \"**There are ZERO questions saved. Ask me one!**\"\n\n question = random.choice(db.data[\"questions\"])\n choices = question[\"choices\"]\n await client.say(message, \"Would you rather **{}** or **{}**?\".format(*choices))\n\n timeout = db.data[\"timeout\"]\n replied = []\n\n # Wait for replies from anyone in the channel\n while True:\n reply = await client.wait_for_message(timeout=timeout, channel=message.channel,\n check=lambda m: m.author not in replied)\n # Break on timeout\n if reply is None:\n break\n\n # Check if the choice is vlaid\n choice = get_choice(choices, reply.content)\n if choice is None:\n continue\n\n # Register that this author has replied\n replied.append(reply.author)\n\n # Update the answers in the DB\n # We don't care about multiples, just the amount (yes it will probably be biased)\n question[\"answers\"][choice] += 1\n\n name = reply.author.display_name\n response = random.choice(db.data[\"responses\"]).format(name=name, NAME=name.upper(), choice=choices[choice])\n await client.say(message, response)\n\n # Say the total tallies\n await client.say(message, \"A total of {0} would **{2}**, while {1} would **{3}**!\".format(\n *question[\"answers\"], *choices))\n db.save()\n sessions.remove(message.channel.id)\n\n # Otherwise, the member asked a question to the bot\n else:\n db.data[\"questions\"].append(dict(\n choices=list(opt),\n answers=[0, 0]\n ))\n db.save()\n\n answer = random.choice(opt)\n await client.say(message, \"**I would {}**!\".format(answer))", "def start_question(client):\r\n global num_player\r\n \r\n # Select option trap\r\n trap = randint(min_start, max_start)\r\n # Ask for the option\r\n msg_client(\"Hai \" + str(max_start) + \" scelte, scegli attentamente \", client)\r\n msg_client(\"Scrivi un numero compreso tra \" + str(min_start) + \" e \" + str(max_start) + \": \", client)\r\n msg = client.recv(BUFSIZ)\r\n # Check if is {quit}\r\n check_quit(msg, client)\r\n # Check if it's between of min number and max number\r\n answer = check_number(client, msg, min_start, max_start)\r\n # Check if he choose the trap\r\n if answer == trap:\r\n msg_client(\"Scelta sbagliata, sarà per la prossima volta! \", client)\r\n if num_player > 1:\r\n num_player -= 1\r\n close_client(client)\r\n else:\r\n msg_client(\"Perfetto, ora hai la possibilità di confrontarti con gli giocatori\", client)", "def test__API_without_answer(self):\n self.mock_connection.state = MockConnection.NO_CONFIRMATIONS\n\n # timeout supposed to be here\n self.assertEqual(self.mutex.lock(), False) # acquire mutex", "def handle_close(self):\r\n self.end_time = time.time()\r\n self.time_ran = self.end_time - self.start_time\r\n if self.status != 'PASS':\r\n server_log.info('Client {} aborted!'.format(self.client_id))\r\n self.status = 'ABORTED'\r\n self.close()", "def takeoff_second():\n\tglobal c2\n\tglobal a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c2.recv(BUF_SIZE) # wait for the armed message\n\tprint a2, ' >> ', msg\n\tif msg != 'Armed':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'TAKEOFF'\n\t\tnew_msg['arg1'] = init2[2]\n\t\tc2.send(json.dumps(new_msg))\n\t\tstate += 1", "def wait_for_response(self, response_to_get, timeout):\n for i in range(0, self._max_incorrect_responses):\n self._logger.info(\"Waiting for user response...\")\n # Save the response we were trying to get in case we need\n # to try again.\n self._last_response_to_get = response_to_get\n self._last_response_timeout = timeout\n # Wait for the specified type of response, or until the\n # specified time has elapsed.\n response, answer = self._ros_node.wait_for_response(response_to_get,\n datetime.timedelta(seconds=int(timeout)))\n\n # After waiting for a response, need to play back an\n # appropriate robot response.\n\n # If we didn't receive a response, then it was probably\n # because we didn't send a valid response to wait for.\n # This is different from a TIMEOUT since we didn't time\n # out -- we just didn't get a response of any kind.\n if not response:\n self._logger.info(\"Done waiting -- did not get valid response!\")\n return False\n\n # If we received no user response before timing out, send a\n # TIMEOUT message and pause the game.\n elif \"TIMEOUT\" in response:\n # Announce we timed out.\n self._ros_node.send_game_state(\"TIMEOUT\")\n # Pause game and wait to be told whether we should try\n # waiting again for a response or whether we should\n # skip it and move on. Queue up the pause command so the\n # main game loop can take action.\n self._game_node_queue.put(\"PAUSE\")\n # Announce the game is pausing.\n self._ros_node.send_game_state(\"PAUSE\")\n # Indicate that we did not get a response.\n # We don't break and let the user try again because the\n # external game monitor deals with TIMEOUT events, and\n # will tell us whether to try waiting again or to just\n # skip waiting for this response.\n return False\n\n # If response was INCORRECT, randomly select a robot\n # response to an incorrect user action.\n elif \"INCORRECT\" in response:\n # Record incorrect response in the db.\n self._personalization_man.record_user_response(\n self._current_question_num, self._current_question_type,\n answer)\n\n try:\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._incorrect_responses[random.randint(0,\n len(self._incorrect_responses)-1)])\n except AttributeError:\n self._logger.exception(\"Could not play an incorrect \"\n + \"response. Maybe none were loaded?\")\n # Don't break so we allow the user a chance to respond\n # again.\n\n # If response was NO, randomly select a robot response to\n # the user selecting no.\n elif \"NO\" in response:\n try:\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._no_responses[random.randint(0,\n len(self._no_responses)-1)])\n except AttributeError:\n self._logger.exception(\"Could not play a response to \"\n + \"user's NO. Maybe none were loaded?\")\n # Don't break so we allow the user a chance to respond\n # again.\n\n # If response was CORRECT, randomly select a robot response\n # to a correct user action, highlight the correct answer,\n # and break out of response loop.\n elif \"CORRECT\" in response:\n # Record correct response in the db.\n self._personalization_man.record_user_response(\n self._current_question_num, self._current_question_type,\n answer)\n try:\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._correct_responses[random.randint(0,\n len(self._correct_responses)-1)])\n self._ros_node.send_opal_command(\"SHOW_CORRECT\")\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._answer_feedback[random.randint(0,\n len(self._answer_feedback)-1)])\n # Pause after speaking before hiding correct again\n time.sleep(self.ANSWER_FEEDBACK_PAUSE_TIME)\n self._ros_node.send_opal_command(\"HIDE_CORRECT\")\n except AttributeError:\n self._logger.exception(\"Could not play a correct \"\n + \"response or could not play robot's answer\"\n + \" feedback. Maybe none were loaded?\")\n # Break from the for loop so we don't give the user\n # a chance to respond again.\n break\n\n # If response was START, randomly select a robot response to\n # the user selecting START, and break out of response loop.\n elif \"START\" in response:\n try:\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._start_responses[random.randint(0,\n len(self._start_responses)-1)])\n except AttributeError:\n self._logger.exception(\"Could not play response to\"\n + \"user's START. Maybe none were loaded?\")\n # Break from the for loop so we don't give the user\n # a chance to respond again.\n break\n\n # We exhausted our allowed number of user responses, so have\n # the robot do something instead of waiting more.\n else:\n # If user was never correct, play robot's correct answer\n # feedback and show which answer was correct in the game.\n if \"CORRECT\" in response_to_get:\n try:\n self._ros_node.send_opal_command(\"SHOW_CORRECT\")\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._answer_feedback[random.randint(0,\n len(self._answer_feedback)-1)])\n # Pause after speaking before hiding correct again.\n time.sleep(self.ANSWER_FEEDBACK_PAUSE_TIME)\n self._ros_node.send_opal_command(\"HIDE_CORRECT\")\n except AttributeError:\n self._logger.exception(\"Could not play robot's answer\"\n + \" feedback! Maybe none were loaded?\")\n\n # If user never selects START (which is used to ask the user\n # if they are ready to play), stop all stories and repeating\n # scripts, continue with main script so we go to the end.\n elif \"START\" in response_to_get:\n self._repeating = False\n self.story = False\n\n # We got a user response and responded to it!\n return True", "def test_timeoutReset(self):\n for i in range(3):\n self.circuit_breaker.failure()\n self.time.advance(29.0)\n available29sec = self.circuit_breaker.available()\n self.time.advance(1.1)\n available30sec = self.circuit_breaker.available()\n self.assertEqual((available29sec, available30sec),\n (False, True))", "def new_client(self, cl_socket, addr):\n while True: # until again=='no'\n g = Game(questions.copy(), cl_socket) # create new game\n g.play() # start paly\n time.sleep(1) # wait after game is done, to prevent issues of sending multiple msgs\n cl_socket.send(GAME_DONE.encode()) # tell client game is over\n again = cl_socket.recv(MAX_INPUT).decode() # get client answer for another game\n if again.lower() == 'no': # if no - thread is finish\n break\n\n # close socket, print info\n cl_socket.close()\n print(f\"[DISCONNECTION] {addr} disconnected\")\n print(f\"[ACTIVE CONNECTIONS] {threading.activeCount() - 2}\")", "def Wait(p_question: str):\n input(p_question)\n return", "def process_open_ended_question_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\tprevious_message = message.previous_message\n\t\twhile hasattr(previous_message, \"previous_message\") and previous_message.previous_message != None:\n\t\t\tprevious_message = previous_message.previous_message\n\n\t\tfor feedback in previous_message.feedbacks.all():\n\t\t\tfeedback.note=response\n\t\t\tfeedback.datetime_responded=now\n\t\t\tfeedback.save()\n\n\t\ttemplate = 'messages/response_open_ended_question.txt'\n\t\tcontent = render_to_string(template)\n\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\treturn HttpResponse(content=content, content_type='text/plain')", "def play_again(p1, p2):\n p1_again = recv_msg(p1)\n p2_again = recv_msg(p2)\n\n send_msg(p1, p2_again)\n send_msg(p2, p1_again)\n\n return p1_again == PLAY_AGAIN_TRUE and p2_again == PLAY_AGAIN_TRUE", "async def _ask(self, ctx, question: _QuestionData):\n check = BaseLock(ctx, lock=self.lock)\n msg_text = question.question if not question.failed else question.parse_fail_response\n msg_embed = question.embed if not question.failed else question.parse_fail_embed\n self._messages.append(await ctx.send(content=msg_text, embed=msg_embed))\n answer_msg = await ctx.bot.wait_for('message', check=check, timeout=self.timeout)\n self._messages.append(answer_msg)\n if answer_msg.content.lower().strip() == self.stop:\n return False\n if question.parser:\n try:\n if isinstance(question.parser, Converter):\n answer = await question.parser.convert(ctx, answer_msg.content)\n elif isinstance(question.parser, type) and issubclass(question.parser, Converter):\n answer = await question.parser().convert(ctx, answer_msg.content)\n else:\n answer = question.parser(answer_msg.content)\n if isawaitable(question.parser):\n answer = await answer\n except Exception as e:\n question.failed = True\n question.parse_fail_response = (question.parse_fail_response.format(answer_msg.content)\n if question.parse_fail_response else None)\n raise UserAnswerParsingError(f\"Failed to parse {question}\") from e\n else:\n answer = answer_msg.content\n return answer", "def play(self):\n prize = 0\n # part 1: 3 questions of 5000 NIS each\n line = 'Welcome to the first part!\\n' + '3 questions of 5000 NIS start NOW\\n'\n self.client.send((NO_RESPONSE + line).encode())\n for i in range(3): # 3 questions\n q = self.get_question() # get random question from stock\n line = str(q) + '\\nChoose your answer (1-4): '\n self.client.send((ASK_RESPONSE + line).encode())\n answer = int(self.client.recv(MAX_INPUT).decode()) # get client answer\n # check answer and update prize\n if answer == q.get_answer():\n line = 'Well Done! you are right!\\n'\n self.client.send((NO_RESPONSE + line).encode())\n prize += 5000\n else:\n line = 'You are wrong! Maybe next time!\\n'\n self.client.send((NO_RESPONSE + line).encode())\n\n # part 2: choose where to start\n line = ('Welcome to the second part!\\n' + 'You have ' + str(prize) + ' NIS for now\\n' +\n 'You can stay with it but you also can...\\n' +\n '1. step back: compete for ' + str(prize * 2) + ' NIS and start 2 steps from the chaser\\n' +\n '2. stay: compete for ' + str(prize) + ' NIS and start 3 steps from the chaser\\n' +\n '3. step ahead: compete for ' + str(prize // 2) + ' NIS and start 4 steps from the chaser\\n' +\n 'Choose an option (1-3): \\n')\n self.client.send((ASK_RESPONSE + line).encode())\n answer = int(self.client.recv(MAX_INPUT).decode())\n prize *= 2 if answer == 1 else 1/2 if answer == 3 else 1 # update prize (*1 or *1/2 or *2)\n prize = int(prize) # and not float\n self.b = Board(answer) # initialize board\n line = '--One time you can type \\'help\\' and disable 2 answers--\\n'\n self.client.send((NO_RESPONSE + line).encode())\n\n # part 2: let the chaser chase!\n for i in range(12): # 12 questions left\n self.client.send((NO_RESPONSE + str(self.b).encode()) # send board\n q = self.get_question() # get random question from stock\n chaser_answer = self.get_chaser_answer(q) # get chaser answer (75% right)\n line = str(q) + '\\nChoose your answer (1-4): '\n self.client.send((ASK_RESPONSE + line).encode())\n\n # get client answer: int (1/2/3/4) -or- 'help'\n while True: # until client choose answer (1/2/3/4)\n player_answer = self.client.recv(MAX_INPUT).decode() # get answer\n if player_answer == 'help':\n if self.there_is_help:\n self.get_help(q) # send 2 option instead of 4\n self.there_is_help = False # update flag\n line = '\\nChoose your answer (1-4): ' # ask for new answer\n self.client.send((ASK_RESPONSE + line).encode())\n continue\n else: # client already used his help, ask for an answer\n line = 'You already used it!\\n' + 'Choose your answer (1-4): '\n self.client.send((ASK_RESPONSE + line).encode())\n continue\n # else: answer is 1/2/3/4\n break\n\n # update board, check if the game end (win/lose)\n self.update_board(int(player_answer), chaser_answer, q.get_answer())\n win_lose = self.check_win_lose()\n if win_lose == 1: # win\n line = 'Well Done! You Win ' + str(prize) + ' NIS!'\n self.client.send((NO_RESPONSE + line).encode())\n return\n elif win_lose == 2: # lose\n line = 'Oh No! You Lose! Maybe Next Time...'\n self.client.send((NO_RESPONSE + line).encode())\n return", "def reply_request(question: str, reply_options = ['y', 'yes', 'n', 'no'], show_reply = False):\n\n reply = None \n while not reply in reply_options:\n reply = input(question).lower()\n else:\n if show_reply:\n print(f'Your choice: {reply}')\n \n if reply in ['yes', 'no']:\n return reply[0]\n else:\n return reply", "def get_help(self, q):\n real_answer = q.get_answer() # first option is the right one\n options = [1, 2, 3, 4] # second option is random out of 4\n options.pop(real_answer-1) # pop right option\n second_option = options[random.randint(0, 2)] # get random wrong option\n for i in range(4): # send the 2 options to the client\n if (i+1) in (real_answer, second_option):\n line = str(i+1) + '. ' + q.get_option(i)\n self.client.send((NO_RESPONSE + line).encode())", "def test__API_other_locks_later(self):\n self.mock_connection.state = MockConnection.CORRECT_NUM_OF_CONFIRMATIONS\n\n # simulate release after 1.5s\n delay = 1.5\n thread = Thread(target=self.mock_connection.simulate_release_from_other_process, args=(1, delay))\n thread.start()\n\n start = time()\n self.mutex.lock()\n\n # simulate request\n self.mock_connection.simulate_request_from_other_process(10) # later than our time\n\n # locked before delay passed=> OK\n self.assertLess(time(), start + delay)\n\n self.mutex.unlock()", "def _assert_message_is_ask_response(\n self,\n message: W24TechreadMessage,\n ask_type: W24AskType,\n ) -> None:\n self._check_request_id(message)\n self._check_message_type(\n message,\n W24TechreadMessageType.ASK,\n ask_type)", "def test_multiple_users_same_survey(self):\n input_data = 'foo bar'\n\n # user A inputs an answer\n self.launchSurvey(self.client_a, 'test', 'textfield')\n self.post(self.client_a, {'name-answer': input_data})\n\n # user B gets taken straight to summary as survey is complete\n self.launchSurvey(self.client_b, 'test', 'textfield')\n last_url_b = self.cache[self.client_b]['last_url']\n self.assertIn('/questionnaire/test/textfield/789/group/0/summary', last_url_b)\n\n # user B manually navigates to answer and can view the value that user A entered\n self.get(self.client_b, '/questionnaire/test/textfield/789/group/0/block')\n last_response_b = self.cache[self.client_b]['last_response']\n self.assertEqual(last_response_b.status_code, 200)\n self.assertIn(input_data, last_response_b.get_data(True))\n\n # user A continues through playback page and submits\n self.post(self.client_a, {})\n self.post(self.client_a, action=None)\n\n # user B tries to submit value\n self.post(self.client_b, {'name-answer': 'bar baz'})\n last_response_b = self.cache[self.client_b]['last_response']\n self.assertEqual(last_response_b.status_code, 401)", "def wait_for_recv_response(self, client):\r\n for i in range(int(client.timeout/TIMEOUT_CHECK)-1):\r\n sleep(TIMEOUT_CHECK)\r\n with client.incoming_data_lock:\r\n if client.waiting_for_response == 0:\r\n return\r\n sleep(TIMEOUT_CHECK)\r\n with client.incoming_data_lock:\r\n if client.waiting_for_response != 0:\r\n client.waiting_for_response = 0\r\n self.send_ipc(client.socket, self.serializer.RESULT_SUCCESS, SERIALIZER_CMD.RECV_EMPTY, [True])", "def respond(self, reply=None, channel=ODKAccess.choice_name(), answers_context={}):\n qset = self.question_set.to_exact\n if self.is_closed():\n return\n next_question = None\n if self.has_started and reply:\n # save reply\n answer_class = Answer.get_class(self.last_question.answer_type)\n answer_class.create(self, self.last_question, reply)\n # compute nnext\n next_question = self.last_question.next_question(reply)\n elif self.has_started is False:\n self.last_question = qset.g_first_question\n self.save()\n reply = None # ignore the initial message\n if self.has_started and reply is None:\n return self.last_question.display_text(channel=channel, context=answers_context)\n # now confirm the question is applicable\n if next_question and AnswerAccessDefinition.is_valid(channel, next_question.answer_type) is False:\n # if not get next line question\n next_question = qset.next_inline(self.last_question, channel=channel)\n response_text = None\n if next_question:\n self.last_question = next_question\n response_text = self.last_question.display_text(channel=channel, context=answers_context)\n else:\n self.closure_date = timezone.now()\n self.save()\n return response_text", "def wait_for_reply(timeout=3):\n i = 0\n reply = Networking.get_instance().client.get_server_reply()\n while not reply:\n reply = Networking.get_instance().client.get_server_reply()\n time.sleep(1)\n i += 1\n if i > timeout:\n raise TimeoutError\n return reply" ]
[ "0.7495978", "0.6053929", "0.5879573", "0.57639295", "0.574705", "0.5609191", "0.5534354", "0.55327255", "0.5498602", "0.54945", "0.54879904", "0.5482634", "0.5468472", "0.5459262", "0.544764", "0.5429914", "0.5415597", "0.53722477", "0.5348782", "0.53448504", "0.5331468", "0.5329378", "0.53164715", "0.5308309", "0.53015053", "0.52981573", "0.5288834", "0.5283643", "0.52737075", "0.525733" ]
0.7941565
0
if one client sends the correct answer, that will close the current question and send both of them a wait delay. If the second client ignores that and sends a timed_out that should then be ignored in favor of the new question
def test_timed_out_after_wrong_answer(self): (user, client), (user2, client2) = self._create_two_connected_clients() battle = client.current_client_battles[str(user._id)] battle.rules['no_questions'] = 3 self._create_question() self._create_question() self._create_question() battle.min_wait_delay -= 3 client.on_message(dict(next_question=1)) self.assertTrue(client._sent[-1]['question']) self.assertTrue(client2._sent[-1]['question']) client.on_message(dict(answer='WRONG')) battle.current_question_sent -= battle.rules['thinking_time'] # fake time client2.on_message(dict(timed_out=1)) self.assertTrue(client._sent[-1]['wait']) self.assertTrue(client2._sent[-1]['wait'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_timed_out_after_correct_answer(self):\n (user, client), (user2, client2) = self._create_two_connected_clients()\n battle = client.current_client_battles[str(user._id)]\n battle.rules['no_questions'] = 2\n self._create_question()\n self._create_question()\n\n battle.min_wait_delay -= 3\n client.on_message(dict(next_question=1))\n self.assertTrue(client._sent[-1]['question'])\n self.assertTrue(client2._sent[-1]['question'])\n\n client.on_message(dict(answer='Yes'))\n client2.on_message(dict(timed_out=1))\n\n self.assertTrue(client._sent[-2]['update_scoreboard'])\n self.assertTrue(client2._sent[-2]['update_scoreboard'])\n\n self.assertTrue(client._sent[-1]['wait'])\n self.assertTrue(client2._sent[-1]['wait'])", "def test__API_with_wrong_answer(self):\n self.mock_connection.state = MockConnection.WRONG_NUM_OF_CONFIRMATIONS\n\n # timeout supposed to be here\n self.assertEqual(self.mutex.lock(), False) # acquire mutex", "def testTerminateResponseWithServerCloseIn2ndValue(self):\n self.client_connect()\n self.client_send('get someWholeVal someChoppedVal\\r\\n')\n self.mock_recv(\"get someWholeVal someChoppedVal\\r\\n\")\n self.mock_send('VALUE someWholeVal 0 10\\r\\n')\n self.mock_send('0123456789\\r\\n')\n self.mock_send('VALUE someChoppedVal 0')\n self.mock_close()\n self.client_recv('VALUE someWholeVal 0 10\\r\\n0123456789\\r\\nEND\\r\\n')", "def _wait_what(self, expected):\r\n \r\n self._msg_server(cb.WAITWHATSERVER % (expected))", "def test__API_with_correct_answers(self):\n self.mock_connection.state = MockConnection.CORRECT_NUM_OF_CONFIRMATIONS\n\n # mutex must be acquired\n self.assertEqual(self.mutex.lock(), True) # acquire mutex\n self.mutex.unlock() # release mutex", "def testTerminateResponseWithServerCloseIn2ndValueData(self):\n self.client_connect()\n self.client_send('get someWholeVal someChoppedVal\\r\\n')\n self.mock_recv(\"get someWholeVal someChoppedVal\\r\\n\")\n self.mock_send('VALUE someWholeVal 0 10\\r\\n')\n self.mock_send('0123456789\\r\\n')\n self.mock_send('VALUE someChoppedVal 0 10\\r\\n')\n self.mock_send('012345')\n self.mock_close()\n self.client_recv('VALUE someWholeVal 0 10\\r\\n0123456789\\r\\nEND\\r\\n')", "def check_answer(update: Update, context: CallbackContext):\n cleaned_text = update.message.text.strip().lower()\n cleaned_soln = context.chat_data['solution'].strip().lower()\n if cleaned_text == cleaned_soln:\n\n # Cancel current question\n chat_id = update.message.chat_id\n data = {'chat_id': chat_id,'context': context}\n chat_jobs = context.job_queue.get_jobs_by_name(str(chat_id))\n for job in chat_jobs:\n job.schedule_removal()\n\n # Update chat with answer\n name = update.message.from_user.first_name\n soln = context.chat_data['solution']\n update.message.reply_text(f'Correct! {name} got the right answer! It was {soln}.')\n\n # Prevent answer trigger\n context.chat_data['solution'] = ''\n\n # Update scores\n user_id = update.message.from_user.id\n if user_id not in context.chat_data['user']:\n context.chat_data['user'][user_id] = dict()\n context.chat_data['user'][user_id]['name'] = name\n context.chat_data['user'][user_id]['points'] = context.chat_data['user'][user_id].get('points', 0) + 1\n\n # Schedule run_quiz if there are more questions\n if context.chat_data['number'] < context.chat_data['total']:\n context.job_queue.run_once(run_quiz, 3, context=data, name=str(chat_id))# Delay time to next question, question answered\n\n # Schedule end_quiz if there are no more questions\n else:\n context.job_queue.run_once(end_quiz, 3, context=data, name=str(chat_id))# Delay time to end quiz, question answered", "def testTerminateResponseWithServerClose(self):\n self.client_connect()\n self.client_send('set chopped 0 0 1\\r\\n')\n self.client_send('1\\r\\n')\n self.mock_recv(\"set chopped 0 0 1\\r\\n1\\r\\n\")\n self.mock_close()\n self.client_recv('.*ERROR .*\\r\\n')", "def main_loop(self, old_answers=None, old_correct_list=None):\r\n if old_answers is None:\r\n old_answers = []\r\n if old_correct_list is None:\r\n old_correct_list = []\r\n raw_answers, is_correct_list = self.ask_question(old_answers,\r\n old_correct_list)\r\n\r\n if sum(is_correct_list) == len(self.correct_answers):\r\n user_response = self.display_correct_window()\r\n else:\r\n user_response = self.display_incorrect_window(is_correct_list)\r\n if user_response == 'Try Again':\r\n self.main_loop(raw_answers, is_correct_list)\r\n if user_response == 'Show Answers':\r\n user_response = self.show_answers(self.correct_answers,\r\n raw_answers, is_correct_list)\r\n if user_response == 'Show Solution':\r\n user_response = self.show_solution()\r\n return user_response", "async def wouldyourather(message: discord.Message, opt: options=None):\n # If there are no options, the bot will ask the questions (if there are any to choose from)\n if opt is None:\n assert message.channel.id not in sessions, \"**A would you rather session is already in progress.**\"\n sessions.add(message.channel.id)\n\n assert db.data[\"questions\"], \"**There are ZERO questions saved. Ask me one!**\"\n\n question = random.choice(db.data[\"questions\"])\n choices = question[\"choices\"]\n await client.say(message, \"Would you rather **{}** or **{}**?\".format(*choices))\n\n timeout = db.data[\"timeout\"]\n replied = []\n\n # Wait for replies from anyone in the channel\n while True:\n reply = await client.wait_for_message(timeout=timeout, channel=message.channel,\n check=lambda m: m.author not in replied)\n # Break on timeout\n if reply is None:\n break\n\n # Check if the choice is vlaid\n choice = get_choice(choices, reply.content)\n if choice is None:\n continue\n\n # Register that this author has replied\n replied.append(reply.author)\n\n # Update the answers in the DB\n # We don't care about multiples, just the amount (yes it will probably be biased)\n question[\"answers\"][choice] += 1\n\n name = reply.author.display_name\n response = random.choice(db.data[\"responses\"]).format(name=name, NAME=name.upper(), choice=choices[choice])\n await client.say(message, response)\n\n # Say the total tallies\n await client.say(message, \"A total of {0} would **{2}**, while {1} would **{3}**!\".format(\n *question[\"answers\"], *choices))\n db.save()\n sessions.remove(message.channel.id)\n\n # Otherwise, the member asked a question to the bot\n else:\n db.data[\"questions\"].append(dict(\n choices=list(opt),\n answers=[0, 0]\n ))\n db.save()\n\n answer = random.choice(opt)\n await client.say(message, \"**I would {}**!\".format(answer))", "def start_question(client):\r\n global num_player\r\n \r\n # Select option trap\r\n trap = randint(min_start, max_start)\r\n # Ask for the option\r\n msg_client(\"Hai \" + str(max_start) + \" scelte, scegli attentamente \", client)\r\n msg_client(\"Scrivi un numero compreso tra \" + str(min_start) + \" e \" + str(max_start) + \": \", client)\r\n msg = client.recv(BUFSIZ)\r\n # Check if is {quit}\r\n check_quit(msg, client)\r\n # Check if it's between of min number and max number\r\n answer = check_number(client, msg, min_start, max_start)\r\n # Check if he choose the trap\r\n if answer == trap:\r\n msg_client(\"Scelta sbagliata, sarà per la prossima volta! \", client)\r\n if num_player > 1:\r\n num_player -= 1\r\n close_client(client)\r\n else:\r\n msg_client(\"Perfetto, ora hai la possibilità di confrontarti con gli giocatori\", client)", "def test__API_without_answer(self):\n self.mock_connection.state = MockConnection.NO_CONFIRMATIONS\n\n # timeout supposed to be here\n self.assertEqual(self.mutex.lock(), False) # acquire mutex", "def handle_close(self):\r\n self.end_time = time.time()\r\n self.time_ran = self.end_time - self.start_time\r\n if self.status != 'PASS':\r\n server_log.info('Client {} aborted!'.format(self.client_id))\r\n self.status = 'ABORTED'\r\n self.close()", "def takeoff_second():\n\tglobal c2\n\tglobal a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c2.recv(BUF_SIZE) # wait for the armed message\n\tprint a2, ' >> ', msg\n\tif msg != 'Armed':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'TAKEOFF'\n\t\tnew_msg['arg1'] = init2[2]\n\t\tc2.send(json.dumps(new_msg))\n\t\tstate += 1", "def wait_for_response(self, response_to_get, timeout):\n for i in range(0, self._max_incorrect_responses):\n self._logger.info(\"Waiting for user response...\")\n # Save the response we were trying to get in case we need\n # to try again.\n self._last_response_to_get = response_to_get\n self._last_response_timeout = timeout\n # Wait for the specified type of response, or until the\n # specified time has elapsed.\n response, answer = self._ros_node.wait_for_response(response_to_get,\n datetime.timedelta(seconds=int(timeout)))\n\n # After waiting for a response, need to play back an\n # appropriate robot response.\n\n # If we didn't receive a response, then it was probably\n # because we didn't send a valid response to wait for.\n # This is different from a TIMEOUT since we didn't time\n # out -- we just didn't get a response of any kind.\n if not response:\n self._logger.info(\"Done waiting -- did not get valid response!\")\n return False\n\n # If we received no user response before timing out, send a\n # TIMEOUT message and pause the game.\n elif \"TIMEOUT\" in response:\n # Announce we timed out.\n self._ros_node.send_game_state(\"TIMEOUT\")\n # Pause game and wait to be told whether we should try\n # waiting again for a response or whether we should\n # skip it and move on. Queue up the pause command so the\n # main game loop can take action.\n self._game_node_queue.put(\"PAUSE\")\n # Announce the game is pausing.\n self._ros_node.send_game_state(\"PAUSE\")\n # Indicate that we did not get a response.\n # We don't break and let the user try again because the\n # external game monitor deals with TIMEOUT events, and\n # will tell us whether to try waiting again or to just\n # skip waiting for this response.\n return False\n\n # If response was INCORRECT, randomly select a robot\n # response to an incorrect user action.\n elif \"INCORRECT\" in response:\n # Record incorrect response in the db.\n self._personalization_man.record_user_response(\n self._current_question_num, self._current_question_type,\n answer)\n\n try:\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._incorrect_responses[random.randint(0,\n len(self._incorrect_responses)-1)])\n except AttributeError:\n self._logger.exception(\"Could not play an incorrect \"\n + \"response. Maybe none were loaded?\")\n # Don't break so we allow the user a chance to respond\n # again.\n\n # If response was NO, randomly select a robot response to\n # the user selecting no.\n elif \"NO\" in response:\n try:\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._no_responses[random.randint(0,\n len(self._no_responses)-1)])\n except AttributeError:\n self._logger.exception(\"Could not play a response to \"\n + \"user's NO. Maybe none were loaded?\")\n # Don't break so we allow the user a chance to respond\n # again.\n\n # If response was CORRECT, randomly select a robot response\n # to a correct user action, highlight the correct answer,\n # and break out of response loop.\n elif \"CORRECT\" in response:\n # Record correct response in the db.\n self._personalization_man.record_user_response(\n self._current_question_num, self._current_question_type,\n answer)\n try:\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._correct_responses[random.randint(0,\n len(self._correct_responses)-1)])\n self._ros_node.send_opal_command(\"SHOW_CORRECT\")\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._answer_feedback[random.randint(0,\n len(self._answer_feedback)-1)])\n # Pause after speaking before hiding correct again\n time.sleep(self.ANSWER_FEEDBACK_PAUSE_TIME)\n self._ros_node.send_opal_command(\"HIDE_CORRECT\")\n except AttributeError:\n self._logger.exception(\"Could not play a correct \"\n + \"response or could not play robot's answer\"\n + \" feedback. Maybe none were loaded?\")\n # Break from the for loop so we don't give the user\n # a chance to respond again.\n break\n\n # If response was START, randomly select a robot response to\n # the user selecting START, and break out of response loop.\n elif \"START\" in response:\n try:\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._start_responses[random.randint(0,\n len(self._start_responses)-1)])\n except AttributeError:\n self._logger.exception(\"Could not play response to\"\n + \"user's START. Maybe none were loaded?\")\n # Break from the for loop so we don't give the user\n # a chance to respond again.\n break\n\n # We exhausted our allowed number of user responses, so have\n # the robot do something instead of waiting more.\n else:\n # If user was never correct, play robot's correct answer\n # feedback and show which answer was correct in the game.\n if \"CORRECT\" in response_to_get:\n try:\n self._ros_node.send_opal_command(\"SHOW_CORRECT\")\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(\n self.WAIT_TIME)),\n properties=self._answer_feedback[random.randint(0,\n len(self._answer_feedback)-1)])\n # Pause after speaking before hiding correct again.\n time.sleep(self.ANSWER_FEEDBACK_PAUSE_TIME)\n self._ros_node.send_opal_command(\"HIDE_CORRECT\")\n except AttributeError:\n self._logger.exception(\"Could not play robot's answer\"\n + \" feedback! Maybe none were loaded?\")\n\n # If user never selects START (which is used to ask the user\n # if they are ready to play), stop all stories and repeating\n # scripts, continue with main script so we go to the end.\n elif \"START\" in response_to_get:\n self._repeating = False\n self.story = False\n\n # We got a user response and responded to it!\n return True", "def test_timeoutReset(self):\n for i in range(3):\n self.circuit_breaker.failure()\n self.time.advance(29.0)\n available29sec = self.circuit_breaker.available()\n self.time.advance(1.1)\n available30sec = self.circuit_breaker.available()\n self.assertEqual((available29sec, available30sec),\n (False, True))", "def new_client(self, cl_socket, addr):\n while True: # until again=='no'\n g = Game(questions.copy(), cl_socket) # create new game\n g.play() # start paly\n time.sleep(1) # wait after game is done, to prevent issues of sending multiple msgs\n cl_socket.send(GAME_DONE.encode()) # tell client game is over\n again = cl_socket.recv(MAX_INPUT).decode() # get client answer for another game\n if again.lower() == 'no': # if no - thread is finish\n break\n\n # close socket, print info\n cl_socket.close()\n print(f\"[DISCONNECTION] {addr} disconnected\")\n print(f\"[ACTIVE CONNECTIONS] {threading.activeCount() - 2}\")", "def Wait(p_question: str):\n input(p_question)\n return", "def process_open_ended_question_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\tprevious_message = message.previous_message\n\t\twhile hasattr(previous_message, \"previous_message\") and previous_message.previous_message != None:\n\t\t\tprevious_message = previous_message.previous_message\n\n\t\tfor feedback in previous_message.feedbacks.all():\n\t\t\tfeedback.note=response\n\t\t\tfeedback.datetime_responded=now\n\t\t\tfeedback.save()\n\n\t\ttemplate = 'messages/response_open_ended_question.txt'\n\t\tcontent = render_to_string(template)\n\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\treturn HttpResponse(content=content, content_type='text/plain')", "def play_again(p1, p2):\n p1_again = recv_msg(p1)\n p2_again = recv_msg(p2)\n\n send_msg(p1, p2_again)\n send_msg(p2, p1_again)\n\n return p1_again == PLAY_AGAIN_TRUE and p2_again == PLAY_AGAIN_TRUE", "async def _ask(self, ctx, question: _QuestionData):\n check = BaseLock(ctx, lock=self.lock)\n msg_text = question.question if not question.failed else question.parse_fail_response\n msg_embed = question.embed if not question.failed else question.parse_fail_embed\n self._messages.append(await ctx.send(content=msg_text, embed=msg_embed))\n answer_msg = await ctx.bot.wait_for('message', check=check, timeout=self.timeout)\n self._messages.append(answer_msg)\n if answer_msg.content.lower().strip() == self.stop:\n return False\n if question.parser:\n try:\n if isinstance(question.parser, Converter):\n answer = await question.parser.convert(ctx, answer_msg.content)\n elif isinstance(question.parser, type) and issubclass(question.parser, Converter):\n answer = await question.parser().convert(ctx, answer_msg.content)\n else:\n answer = question.parser(answer_msg.content)\n if isawaitable(question.parser):\n answer = await answer\n except Exception as e:\n question.failed = True\n question.parse_fail_response = (question.parse_fail_response.format(answer_msg.content)\n if question.parse_fail_response else None)\n raise UserAnswerParsingError(f\"Failed to parse {question}\") from e\n else:\n answer = answer_msg.content\n return answer", "def play(self):\n prize = 0\n # part 1: 3 questions of 5000 NIS each\n line = 'Welcome to the first part!\\n' + '3 questions of 5000 NIS start NOW\\n'\n self.client.send((NO_RESPONSE + line).encode())\n for i in range(3): # 3 questions\n q = self.get_question() # get random question from stock\n line = str(q) + '\\nChoose your answer (1-4): '\n self.client.send((ASK_RESPONSE + line).encode())\n answer = int(self.client.recv(MAX_INPUT).decode()) # get client answer\n # check answer and update prize\n if answer == q.get_answer():\n line = 'Well Done! you are right!\\n'\n self.client.send((NO_RESPONSE + line).encode())\n prize += 5000\n else:\n line = 'You are wrong! Maybe next time!\\n'\n self.client.send((NO_RESPONSE + line).encode())\n\n # part 2: choose where to start\n line = ('Welcome to the second part!\\n' + 'You have ' + str(prize) + ' NIS for now\\n' +\n 'You can stay with it but you also can...\\n' +\n '1. step back: compete for ' + str(prize * 2) + ' NIS and start 2 steps from the chaser\\n' +\n '2. stay: compete for ' + str(prize) + ' NIS and start 3 steps from the chaser\\n' +\n '3. step ahead: compete for ' + str(prize // 2) + ' NIS and start 4 steps from the chaser\\n' +\n 'Choose an option (1-3): \\n')\n self.client.send((ASK_RESPONSE + line).encode())\n answer = int(self.client.recv(MAX_INPUT).decode())\n prize *= 2 if answer == 1 else 1/2 if answer == 3 else 1 # update prize (*1 or *1/2 or *2)\n prize = int(prize) # and not float\n self.b = Board(answer) # initialize board\n line = '--One time you can type \\'help\\' and disable 2 answers--\\n'\n self.client.send((NO_RESPONSE + line).encode())\n\n # part 2: let the chaser chase!\n for i in range(12): # 12 questions left\n self.client.send((NO_RESPONSE + str(self.b).encode()) # send board\n q = self.get_question() # get random question from stock\n chaser_answer = self.get_chaser_answer(q) # get chaser answer (75% right)\n line = str(q) + '\\nChoose your answer (1-4): '\n self.client.send((ASK_RESPONSE + line).encode())\n\n # get client answer: int (1/2/3/4) -or- 'help'\n while True: # until client choose answer (1/2/3/4)\n player_answer = self.client.recv(MAX_INPUT).decode() # get answer\n if player_answer == 'help':\n if self.there_is_help:\n self.get_help(q) # send 2 option instead of 4\n self.there_is_help = False # update flag\n line = '\\nChoose your answer (1-4): ' # ask for new answer\n self.client.send((ASK_RESPONSE + line).encode())\n continue\n else: # client already used his help, ask for an answer\n line = 'You already used it!\\n' + 'Choose your answer (1-4): '\n self.client.send((ASK_RESPONSE + line).encode())\n continue\n # else: answer is 1/2/3/4\n break\n\n # update board, check if the game end (win/lose)\n self.update_board(int(player_answer), chaser_answer, q.get_answer())\n win_lose = self.check_win_lose()\n if win_lose == 1: # win\n line = 'Well Done! You Win ' + str(prize) + ' NIS!'\n self.client.send((NO_RESPONSE + line).encode())\n return\n elif win_lose == 2: # lose\n line = 'Oh No! You Lose! Maybe Next Time...'\n self.client.send((NO_RESPONSE + line).encode())\n return", "def reply_request(question: str, reply_options = ['y', 'yes', 'n', 'no'], show_reply = False):\n\n reply = None \n while not reply in reply_options:\n reply = input(question).lower()\n else:\n if show_reply:\n print(f'Your choice: {reply}')\n \n if reply in ['yes', 'no']:\n return reply[0]\n else:\n return reply", "def get_help(self, q):\n real_answer = q.get_answer() # first option is the right one\n options = [1, 2, 3, 4] # second option is random out of 4\n options.pop(real_answer-1) # pop right option\n second_option = options[random.randint(0, 2)] # get random wrong option\n for i in range(4): # send the 2 options to the client\n if (i+1) in (real_answer, second_option):\n line = str(i+1) + '. ' + q.get_option(i)\n self.client.send((NO_RESPONSE + line).encode())", "def test__API_other_locks_later(self):\n self.mock_connection.state = MockConnection.CORRECT_NUM_OF_CONFIRMATIONS\n\n # simulate release after 1.5s\n delay = 1.5\n thread = Thread(target=self.mock_connection.simulate_release_from_other_process, args=(1, delay))\n thread.start()\n\n start = time()\n self.mutex.lock()\n\n # simulate request\n self.mock_connection.simulate_request_from_other_process(10) # later than our time\n\n # locked before delay passed=> OK\n self.assertLess(time(), start + delay)\n\n self.mutex.unlock()", "def _assert_message_is_ask_response(\n self,\n message: W24TechreadMessage,\n ask_type: W24AskType,\n ) -> None:\n self._check_request_id(message)\n self._check_message_type(\n message,\n W24TechreadMessageType.ASK,\n ask_type)", "def test_multiple_users_same_survey(self):\n input_data = 'foo bar'\n\n # user A inputs an answer\n self.launchSurvey(self.client_a, 'test', 'textfield')\n self.post(self.client_a, {'name-answer': input_data})\n\n # user B gets taken straight to summary as survey is complete\n self.launchSurvey(self.client_b, 'test', 'textfield')\n last_url_b = self.cache[self.client_b]['last_url']\n self.assertIn('/questionnaire/test/textfield/789/group/0/summary', last_url_b)\n\n # user B manually navigates to answer and can view the value that user A entered\n self.get(self.client_b, '/questionnaire/test/textfield/789/group/0/block')\n last_response_b = self.cache[self.client_b]['last_response']\n self.assertEqual(last_response_b.status_code, 200)\n self.assertIn(input_data, last_response_b.get_data(True))\n\n # user A continues through playback page and submits\n self.post(self.client_a, {})\n self.post(self.client_a, action=None)\n\n # user B tries to submit value\n self.post(self.client_b, {'name-answer': 'bar baz'})\n last_response_b = self.cache[self.client_b]['last_response']\n self.assertEqual(last_response_b.status_code, 401)", "def wait_for_recv_response(self, client):\r\n for i in range(int(client.timeout/TIMEOUT_CHECK)-1):\r\n sleep(TIMEOUT_CHECK)\r\n with client.incoming_data_lock:\r\n if client.waiting_for_response == 0:\r\n return\r\n sleep(TIMEOUT_CHECK)\r\n with client.incoming_data_lock:\r\n if client.waiting_for_response != 0:\r\n client.waiting_for_response = 0\r\n self.send_ipc(client.socket, self.serializer.RESULT_SUCCESS, SERIALIZER_CMD.RECV_EMPTY, [True])", "def respond(self, reply=None, channel=ODKAccess.choice_name(), answers_context={}):\n qset = self.question_set.to_exact\n if self.is_closed():\n return\n next_question = None\n if self.has_started and reply:\n # save reply\n answer_class = Answer.get_class(self.last_question.answer_type)\n answer_class.create(self, self.last_question, reply)\n # compute nnext\n next_question = self.last_question.next_question(reply)\n elif self.has_started is False:\n self.last_question = qset.g_first_question\n self.save()\n reply = None # ignore the initial message\n if self.has_started and reply is None:\n return self.last_question.display_text(channel=channel, context=answers_context)\n # now confirm the question is applicable\n if next_question and AnswerAccessDefinition.is_valid(channel, next_question.answer_type) is False:\n # if not get next line question\n next_question = qset.next_inline(self.last_question, channel=channel)\n response_text = None\n if next_question:\n self.last_question = next_question\n response_text = self.last_question.display_text(channel=channel, context=answers_context)\n else:\n self.closure_date = timezone.now()\n self.save()\n return response_text", "def wait_for_reply(timeout=3):\n i = 0\n reply = Networking.get_instance().client.get_server_reply()\n while not reply:\n reply = Networking.get_instance().client.get_server_reply()\n time.sleep(1)\n i += 1\n if i > timeout:\n raise TimeoutError\n return reply" ]
[ "0.7941759", "0.60539633", "0.5878274", "0.57633615", "0.5746768", "0.5607931", "0.5534128", "0.55330086", "0.54986674", "0.54959106", "0.5488982", "0.54822683", "0.5467383", "0.54589397", "0.5447852", "0.5429059", "0.5414662", "0.5373624", "0.53498834", "0.53451174", "0.53329587", "0.53294605", "0.5318572", "0.5308904", "0.53001755", "0.5298899", "0.52894694", "0.5282343", "0.52743053", "0.52572703" ]
0.7496497
1
Returns all possible substrings from string
def __get_all_possible_substrings(base_string): substrings = [] for n in range(1, len(base_string) + 1): for i in range(len(base_string) - n + 1): substrings.append(base_string[i:i + n]) return substrings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subs(input_string):\n length = len(input_string)\n return [input_string[i:j+1] for i in xrange(length) for j in xrange(i,length)]", "def get_substrings(string, n):\n substrings = set()\n\n for i in range(len(string) - n + 1):\n substrings.add(string[i:i+n])\n\n return [substring for substring in substrings]", "def substrings(s):\n if s:\n yield from prefixes(s) # 获得当前s的所有prefix\n yield from substrings(s[1:])", "def gen_substring(string: str):\r\n s = []\r\n for i in range(len(string)):\r\n\r\n for j in range(i):\r\n\r\n s.append(string[j:i])\r\n\r\n return s", "def substrings(s):\r\n if s:\r\n yield from prefixes(s) # Get all the substrings that contains the first letter.\r\n yield from substrings(s[1:]) # Slice the first letter of s, then get substrings of s[1:]\r", "def find_all_substrings(s, shortest, longest=0):\n if longest <= shortest:\n longest = shortest + 1\n substrings = [(i, s[i:i+length])\n for length in range(shortest, longest)\n for i in range(len(s) - length + 1)]\n return substrings", "def gen_all_strings(word):\n if not word:\n return [\"\"]\n \n all_strings = []\n for string in gen_all_strings(word[1:]):\n for letter_idx in range(len(string) + 1):\n all_strings.append(string[letter_idx:] + word[0] + string[:letter_idx])\n \n return gen_all_strings(word[1:]) + all_strings", "def findall(self, string: str) -> List[str]:\r\n \r\n string = list(string)\r\n matches = []\r\n\r\n while len(string) > 0:\r\n tokens = self.tokens.copy()\r\n match = []\r\n\r\n while len(tokens) > 0:\r\n token = _advance(tokens)\r\n char = _advance(string)\r\n\r\n if token is None:\r\n break\r\n if char is None and token.type != TokenType.OPTIONAL:\r\n match = []\r\n break\r\n\r\n value = token.match(tokens, string, char)\r\n if value is None:\r\n match = []\r\n break\r\n match.extend(value)\r\n \r\n if len(match) > 0:\r\n matches.append(''.join(match))\r\n\r\n return matches", "def substrings(a, b, n):\n\n # TODO\n return []", "def test_find_all_substrings_01():\n assert (U.find_all_substrings(s, 17, 300) ==\n U.find_all_substrings(s, 17, 27))\n s2 = ''.join([random.choice(s) for i in range(100)])\n assert (U.find_all_substrings(s2, 17, 300) ==\n U.find_all_substrings(s2, 17, len(s2) + 1))", "def test_find_all_substrings_03():\n assert (U.find_all_substrings(s, 17, 17) ==\n U.find_all_substrings(s, 17, 18))\n s2 = ''.join([random.choice(s) for i in range(100)])\n assert (U.find_all_substrings(s2, 17, 17) ==\n U.find_all_substrings(s2, 17, 18))", "def test_find_all_substrings_02():\n assert (U.find_all_substrings(s, 17, 5) ==\n U.find_all_substrings(s, 17, 18))\n s2 = ''.join([random.choice(s) for i in range(100)])\n assert (U.find_all_substrings(s2, 17, 5) ==\n U.find_all_substrings(s2, 17, 18))", "def retrieve_sub(s, n):\n subs = []\n for idx, char in enumerate(s):\n sub = char\n c = 1\n for next_char in s[idx + 1:]:\n if c >= n:\n break\n else:\n sub += next_char\n c += 1\n subs.append(sub)\n return [x for x in subs if len(x) == n]", "def test_get_substrings_all(self):\n\n ans = [s.value() for s in self.sf.get_substrings(0, False)]\n\n expected_values = [(0, 1, 2, 3, 4, 5), (0, 1, 2, 3, 4), (1, 2, 3, 4, 5), (1, 2, 3, 4), \\\n (2, 3, 4, 5), (2, 3, 4), (3, 4, 5), (3, 4), (4, 5), (4,), (5,)]\n\n self.assertEqual(ans, expected_values)", "def substrings(a, b, n):\n def retrieve_sub(s, n):\n \"\"\"Retrieves the substrings of length n from the given string\"\"\"\n subs = []\n for idx, char in enumerate(s):\n sub = char\n c = 1\n for next_char in s[idx + 1:]:\n if c >= n:\n break\n else:\n sub += next_char\n c += 1\n subs.append(sub)\n return [x for x in subs if len(x) == n]\n\n # TODO\n subs_A, subs_B = retrieve_sub(a, n), retrieve_sub(b, n)\n\n return list(set([sub for sub in subs_A if sub in subs_B]))", "def gen_all_strings(word):\r\n if len(word) == 0:\r\n return ['']\r\n else:\r\n first = word[0]\r\n rest = gen_all_strings(word[1:])\r\n new = []\r\n for item in rest:\r\n if len(item) > 0:\r\n for pos in range(len(item)):\r\n new.append(item[:pos] + first + item[pos:])\r\n new.append(item + first)\r\n new.append(first)\r\n new.extend(rest)\r\n return new", "def gen_all_strings(word):\n if word == '':\n return ['']\n else:\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n all_words = []\n for string in rest_strings:\n for leter in range(len(string)+1):\n all_words.append(string[0:leter]+first+string[leter:])\n\n return rest_strings + all_words", "def gen_all_strings(word):\n if DEBUG_GAS:\n print \"WORD\", word\n if len(word) < 1:\n if DEBUG_GAS:\n print \"BASE ZERO\"\n print \"len(word)\", len(word)\n print \"word\", word\n return ['']\n if len(word) == 1:\n if DEBUG_GAS:\n print \"BASE ONE\"\n print \"len(word)\", len(word)\n print \"word\", word\n return ['', word]\n else:\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n permutations = []\n if DEBUG_GAS:\n print \"rest_strings\", rest_strings\n print first, rest\n for item in rest_strings:\n if DEBUG_GAS:\n print \"rest_strings item\", item\n for dummy_idx in range(len(item)+1):\n if DEBUG_GAS:\n print \"dummy_idx\", dummy_idx\n print \"item\", item\n permutations.append(str(item[:dummy_idx] + first + item[dummy_idx:]))\n for item in permutations:\n rest_strings.append(item)\n return rest_strings", "def findAllCommonSubstrings(self, string1, string2):\n countingArray = [[0]*len(string1) for i in range(len(string2))]\n ##we set up an array to locate the possible substrings\n candidates = set()\n for row in range(len(countingArray)):\n for column in range(len(countingArray[row])):\n if string2[row] == string1[column]:\n countingArray[row][column] += 1\n if not (row == 0 or column == 0):\n countingArray[row][column] += countingArray[row-1][column-1]\n candidates.add(string2[row-countingArray[row][column]+1:row+1])\n candidates = list(candidates)\n candidates.sort(key=len)\n candidates = list(reversed(candidates))\n return candidates", "def find_all(self, s):\n return self.re.findall(s)", "def findall(string,chars):\n nb = len(chars) \n return [ pos for pos, c in enumerate(string)\n if pos + nb <= len(string) and string[pos:pos + nb] == chars]", "def substrings(a, b, n):\n\n substringsA = set()\n substringsB = set()\n\n #calls helperfunction\n sub(a, n, substringsA)\n sub(b, n, substringsB)\n\n return list(substringsA & substringsB)", "def match_all_cui(s,max_len = 10, Eterm_cui = Eterm_cui):\n if len(s) == 0: \n return []\n sub_label = np.zeros(len(s),dtype = 'int')\n location_term = {}\n i = 0\n while i < len(s):\n for j in range(max_len+1,0,-1):\n temp = ' '.join(s[i:i+j])\n if temp in Eterm_cui:\n sub_label[i:i+j] = 1\n location_term[i] = [Eterm_cui[temp]]\n break#matched maximum string, so break\n i += j\n output = []\n for i in range(len(s)):\n if sub_label[i] == 0:#no match\n output += [s[i]]\n elif i in location_term:\n for cui in location_term[i][: :-1]:\n output += [cui]\n return output", "def find_all( source, substring, start=None, end=None, overlap=False ):\n return [x for x in find_all_iter( source, substring, start, end, overlap )]", "def gen_all_strings(word):\n if len(word) == 0:\n return [\"\"]\n elif len(word) == 1:\n return [\"\",word]\n else:\n result_strings = []\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n new_strings = []\n for rest_string in rest_strings:\n for dummy_index in range(len(rest_string)):\n #在首位插入\n if dummy_index == 0:\n new_string = first + rest_string\n new_strings.append(new_string)\n #在中间插入 \n else:\n new_string = rest_string[0:dummy_index] + first + rest_string[dummy_index:]\n new_strings.append(new_string)\n #在末尾插入\n new_strings.append(rest_string + first)\n \n result_strings.extend(rest_strings)\n result_strings.extend(new_strings)\n \n return result_strings", "def substrings(a, b, n):\n # Create empty lists\n c = []\n d = []\n # Append in new lists the length of the selected string based on user input\n # Loop looks at each character while staying within length of specified substring size\n for i in range(len(a) - n + 1):\n c.append(a[i:i+n])\n\n for i in range(len(b) - n + 1):\n d.append(b[i:i+n])\n\n # Lists of substrings are turned into sets to remove duplicates\n setA = set(c)\n setB = set(d)\n\n return setA & setB", "def get_subs(n):\n \n from itertools import product\n return [''.join(sub) for sub in product('CATGN', repeat=n)]", "def findLongestCommonSubstringManyStrings(listOfStrings):", "def read(string):\n\treturn (re.finditer('(?<=\\[)[a-z]+(?=\\])', string), re.finditer('(?<=\\])[a-z]+|[a-z]+(?=\\[)', string))", "def superstring(g):\n substrings = []\n last_overlap = 0\n i = source(g)\n while True:\n substrings.append(g.vertex_label(i)[last_overlap:])\n if g.outdegree(i) > 0:\n j = g.out_edges(i)[0][1]\n last_overlap = g.edge_weight(i, j)\n i = j\n else:\n break\n return \"\".join(substrings)" ]
[ "0.7558087", "0.7136203", "0.68534404", "0.6638399", "0.65625757", "0.65106237", "0.6446263", "0.6429863", "0.641203", "0.6372371", "0.62816674", "0.62804395", "0.6205983", "0.61538947", "0.6139199", "0.60962164", "0.6095457", "0.60795236", "0.6059808", "0.60547954", "0.60365313", "0.6034267", "0.59421045", "0.59237146", "0.5922042", "0.5895598", "0.5872956", "0.5845797", "0.5798891", "0.5773946" ]
0.853459
0
Returns all candidates sorted by leftovers_count parameter
def __get_candidates_best_by_leftovers_count(substrings, base_string): candidates = [] for element in substrings: elements_count = base_string.count(element) leftovers_count = len(base_string.replace(element, "")) candidates.append([element, elements_count, leftovers_count]) candidates.sort(key=lambda x: x[2]) return candidates
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_candidates_best_by_elements_count(substrings):\n candidates = []\n best_leftover = substrings[0][2]\n for element in substrings:\n if element[2] == best_leftover:\n candidates.append(element)\n candidates.sort(reverse=True, key=lambda x: x[1])\n return candidates", "def generateCandidates(self):\n\t\tprint(\"Candidate list:\\n\")\n\t\tkeys = list(self.prune_list.keys())\n\t\ttuple_count = len(keys[0])\n\t\tprune_list = {}\n\t\ttup = []\n\t\tfor v in comb(keys, 2):\n\t\t\ta = set(v[0])\n\t\t\tb = set(v[1])\n\t\t\t\n\t\t\t# If there's as many common element in a & b as one less than tuple_count\n\t\t\tif((len(a & b) == (tuple_count - 1)) and (tuple(a | b) not in tup)):\n\t\t\t\ttup.append(tuple(a | b))\n\t\t\t\tprint(tup[-1])\n\t\t\t\t# Update prune list\n\t\t\t\tcount = self.getSupport(tup[-1])\n\t\t\t\tif(count >= self.support):\n\t\t\t\t\tprune_list[tup[-1]] = count\n\t\treturn prune_list", "def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return", "def prioritize_candidates(lst_cand):\n print(f\"\\nprioritize_candidates(); len = {len(lst_cand)}\")\n if len(lst_cand) > 1:\n for n in range(len(lst_cand)):\n nc = list(lst_cand[n])\n nc.insert(0,0)\n lst_cand[n] = nc\n for cand in lst_cand:\n # some text adds p\n if cand[1].find(\"Okay\") > -1:\n cand[0] += 10\n if cand[1].lower().find(\"serie\") > -1:\n cand[0] += 10\n if cand[1].find(\"__NAM\") > -1:\n cand[0] += 10\n if cand[1].find(\"BIX_\") > -1:\n cand[0] += 10\n if cand[1].find(\"REF_\") > -1:\n cand[0] += 10\n if cand[1].find(\"veracrypt1\") > -1:\n cand[0] += 100\n if cand[1].find(\"veracrypt2\") > -1:\n cand[0] += -10\n # some text cost p\n if any([cand[1].find(f\"-{n}\") > -1 for n in range(9)]):\n cand[0] -= 5\n if cand[1].find(\"DEL\") > -1:\n cand[0] -= 100\n if cand[1].find(\"copy\") > -1:\n cand[0] -= 50\n if cand[1].find(\"output\") > -1:\n cand[0] -= 6\n if cand[1].find(\".part\") > -1:\n cand[0] -= 9\n # deeper path adds p\n cand[0] += cand[1].count(os.sep)\n # If still even, older is better\n lst_top = [cand for cand in sorted(lst_cand, reverse=True)]\n if lst_top[0][0] == lst_top[1][0]: # No winner\n if lst_top[0][2] < lst_top[1][2]: # head is oldest\n lst_top[0][0] += 1\n else:\n lst_top[1][0] += 1\n return lst_top\n else: # Too few to prioritize\n return lst_cand # return unchanged", "def _prune_candidates(self, beam_width=None):\n if beam_width is None:\n beam_width = self.beam_width\n if len(self.candidates) <= beam_width:\n return\n neg_scores = np.array([-cand.logp_total() for cand in self.candidates])\n parted_indices = np.argpartition(neg_scores, beam_width - 1)\n self.candidates = np.array(self.candidates)[parted_indices[:beam_width]].tolist()", "def get_all_candidates(self) -> list:", "def top_n_combined(self, n):\n top = set()\n for feat_set in self.itervalues():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top |= best\n return top", "def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break", "def min_support_candidates(candidates, min_support): \n\n frequent_candidates = []\n for candidate in candidates:\n if candidate.count >= min_support:\n frequent_candidates.append(candidate)\n return frequent_candidates", "def find_function_candidates(remaining_segments):\n segment_counts = {}\n for segment in remaining_segments:\n for start in range(0, len(segment)):\n for end in range(start + 1, len(route_steps)):\n subsegment = tuple(segment[start:end + 1])\n if len(\",\".join(subsegment)) <= 20:\n segment_counts[subsegment] = segment_counts.get(subsegment, 0) + 1\n\n result = []\n for sequence, count in dict(segment_counts).items():\n if count > 1:\n result.append((list(sequence), count))\n result.sort(reverse=True, key=lambda seq: len(seq[0]) * seq[1])\n return result", "def get_candidates(results, num_results=None):\n candidates = OrderedDict()\n\n for result in results.order_by('candidate__race_type', 'candidate__order',\n 'entry_version'):\n candidate = result.candidate\n\n if candidates.get(candidate):\n candidates[candidate].append(result)\n else:\n candidates.update({candidate: [result]})\n\n return [[c] + r[0:num_results] if num_results else r\n for c, r in candidates.items()]", "def keys_sorted_by_frequency(self, cutoff=100):\n return [key for key, _ in self.counter.most_common()][:cutoff]", "def getTopK(counter, tup, k=25):\n adj_list = [] #list of tuples that co occur with tup at least once\n for t in counter.relgram_map[tup]:\n adj_list.append((tup, t)) #add all that appear after tup\n\n\n for i in counter.relgram_map: #find any that appear before tup\n for j in counter.relgram_map[i]:\n if j == tup and i not in adj_list: \n adj_list.append((i, tup))\n\n scores = [(x, SCP(counter, x[0], x[1])) for x in adj_list] \n return sorted(scores, key=lambda x: x[1], reverse=True)", "def topMatches(prefs, person, n=5, similarity=sim_pearson):\n all_matches = [(similarity(prefs, person, other), other) \n for other in prefs.keys()\n if person != other]\n all_matches.sort()\n all_matches.reverse()\n return all_matches[0:n]", "def top_items(self, n=10, filter=None):\n if n > len(self): n = len(self)\n order = np.argsort(self)\n if filter is None:\n indices = order[-1:-n-1:-1]\n return [(self.label(idx), self[idx]) for idx in indices]\n idx = -1\n results = []\n while len(results) != n and idx >= -len(order):\n where = order[idx]\n label = self.label(where)\n if filter(label):\n results.append((label, self[where]))\n idx -= 1\n return results", "def sort_priors(self):\n return", "def candidates(self, min_count, stops=None, tags={\"NN\", \"NNS\", \"NNP\"}):\n if stops is None:\n stops = []\n candidates = set()\n for word_i, word_j in self.bigrams():\n # Filter out bigrams with stopwords.\n if word_i not in stops and word_j not in stops:\n # Make sure bigrams are alphabetical.\n if self.is_lexical(word_i, word_j):\n # Filter out infrequent bigrams.\n if self.bigrams()[word_i, word_j] >= min_count:\n if self.has_relevant_tag((word_i, word_j), tags):\n candidates.add((word_i, word_j))\n return candidates", "def filter_terms_by_cnt(self, min_count):\n filtered_terms = [term for term in self.term2id if self.term_frequent[term] >= min_count]\n # rebuild the term x id map\n self.term2id = {}\n self.id2term = {}\n for term in self.initial_terms:\n self.add(term, count=0)\n for term in filtered_terms:\n self.add(term, count=0)", "def sorted_clusters(self):\n return (c for _, c in sorted((-c.size(), c) for c in self.clusters))", "def best_genomes(self, n):\n def key(g):\n return g.fitness\n\n return sorted(self.most_fit_genomes, key=key, reverse=True)[:n]", "def generate_top20_candidates(cosine_sim):\n top20_indices = cosine_sim[0].argsort()[:-21:-1]\n top20_cosine = [cosine_sim[0][i] for i in top20_indices]\n return top20_indices, top20_cosine", "def selection_sort(unsorted):\n n = len(unsorted)\n _sorted = []\n for _ in range(0, n):\n val = min(unsorted)\n _sorted.append(val)\n unsorted.remove(val)\n del unsorted", "def top_matches(prefs, person, n=5, similarity=sim_pearson):\n scores = [(similarity(prefs, person, other), other)\n for other in prefs if other != person]\n\n scores.sort()\n scores.reverse()\n return scores[0:n]", "def non_maximum_suppression_slow(boxes, confs, iou_threshold, top_k):\n idxs = np.argsort(-confs)\n selected = []\n for idx in idxs:\n if np.any(iou(boxes[idx], boxes[selected]) >= iou_threshold):\n continue\n selected.append(idx)\n if len(selected) >= top_k:\n break\n return selected", "def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]", "def strongest(self):\n pps = collections.Counter()\n for crd in self:\n pps += collections.Counter( {crd.suit:crd.hc} )\n return sorted(pps.items(), reverse=True, key=lambda x:x[1])", "def sort_suggestions(\n suggestions: List[Tuple[Set[str], float]]\n) -> List[Tuple[Set[str], float]]:\n confidence_list = [suggestion[1] for suggestion in suggestions]\n sort_index = sorted(range(len(confidence_list)), key=lambda k: confidence_list[k])\n # Inverse the sort\n sort_index = sort_index[::-1]\n return [suggestions[i] for i in sort_index]", "def get_adopters_for_advertisement(adoption_center, list_of_adopters, n):\n list_of_adopters = sorted(list_of_adopters, key=lambda x:x.get_name())\n ordered_list = sorted(list_of_adopters, key=lambda x:x.get_score(adoption_center),reverse = True)\n return ordered_list[:n]", "def k_most_talkative(self):\n word_counts = self.get_usercounts() # {u1: 3, u2: 4, }\n word_counts_heap = [(-count, username) for username, count in word_counts.items()] # [(-4, username), (-3, username)]\n heapify(word_counts_heap) # [(-4, u2), (-3, u1)]\n counter = 0\n while word_counts_heap or counter < k:\n _, username = heappop(word_counts_heap)\n counter += 1 # 1, 2\n yield username # u2, u1", "def reduce_sort_counts(self, type, word_counts):\n aux = 0\n for count, word in sorted(word_counts, reverse=True):\n if aux < 15: # Controls that we get only the 15 most common keywords\n aux = aux+1\n yield type, (int(count), word)" ]
[ "0.6552511", "0.63088584", "0.59395087", "0.58303636", "0.57138914", "0.5692777", "0.56461126", "0.5624564", "0.5565643", "0.5555702", "0.5529659", "0.5528722", "0.55041605", "0.5468694", "0.54568607", "0.54567075", "0.5369894", "0.5364155", "0.5324101", "0.5313352", "0.53036785", "0.5296125", "0.52946496", "0.5268119", "0.5260614", "0.52593756", "0.5257367", "0.522697", "0.5220888", "0.5214468" ]
0.6589607
0
Returns all candidates sorted by substring_count parameter
def __get_candidates_best_by_elements_count(substrings): candidates = [] best_leftover = substrings[0][2] for element in substrings: if element[2] == best_leftover: candidates.append(element) candidates.sort(reverse=True, key=lambda x: x[1]) return candidates
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_candidates_best_by_leftovers_count(substrings, base_string):\n candidates = []\n for element in substrings:\n elements_count = base_string.count(element)\n leftovers_count = len(base_string.replace(element, \"\"))\n candidates.append([element, elements_count, leftovers_count])\n candidates.sort(key=lambda x: x[2])\n return candidates", "def main(str_text):\n\n frequencies = count_value(str_text)\n sorted_data = sort_dict(frequencies)\n\n return sorted_data", "def fetch_candidates(title, n_words=2):\n\tsplit_marks = [\".\",\":\"]\n\tsearch_string = None\n\tclean_title = filter_stop(cleanup_string(remove_hyphenation(title)))\n\tsearch_string = \" \".join(clean_title.split()[:n_words])\n\treturn search_string, search_ES(search_string)", "def __init__(self, document, k):\n doc = document\n self.n = k\n lst = []\n for l in range(1, self.n+1): #loops through every substring up to n\n for i in range(0, len(doc)-l): #loops through each index up to the length - l because that's where we need to start chopping up\n lst.append(doc[i:i+l]) #adds it to the list\n for i in range(len(doc)-l, len(doc)): #loops through the last indices that need to be chopped\n lst.append(doc[i:]) #adds them to the list as well\n self.ststr = lambda x, y: 0 if x == y else (-1 if x < y else 1) #creates the compare function\n self.sortedList = mysort(lst, self.ststr) #sorts the list and stores it\n '''\n for i in range(0, len(doc) - k):\n lst.append(doc[i:i + k])\n for i in range(len(doc) - k, len(doc)):\n lst.append(doc[i:])\n '''", "def find_substitutes(text):\n if CHAINS == {}:\n generate_food_chains()\n\n candidates = []\n subs = []\n for i in range(len(text)):\n char = text[i]\n if CHAINS.get(char):\n candidates = []\n candidates = CHAINS[char]\n else:\n if candidates != []:\n # choose the most popular option from candidates\n counts = {}\n for candidate in candidates:\n if counts.get(candidate):\n counts[candidate] += 1\n else:\n counts[candidate] = 1\n max_count = 0\n chosen = None\n for candidate, count in counts.iteritems():\n if count > max_count:\n max_count = count\n chosen = candidate\n if chosen:\n subs.append((chosen, i))\n\n candidates = []\n return subs", "def kmp_search(full_str, sub_str):\n n, m = len(full_str), len(sub_str)\n result = []\n pi = get_partial_match(sub_str)\n begin, matched = 0, 0\n while begin <= (n - m):\n if matched < m and full_str[begin + matched] == sub_str[matched]:\n matched += 1\n if matched == m:\n result.append(begin)\n else:\n if matched == 0:\n begin += 1\n else:\n begin += (matched - pi[matched - 1])\n matched = pi[matched - 1]\n return result", "def shorten(strings, n):\n return sorted(strings, key=lambda x: x[n])", "def _sorted_counter(self, input_string: str) \\\n -> List[Tuple[str, int]]:\n _counter = self._word_counter(input_string=input_string)\n\n return sorted(_counter.most_common(), key=lambda x: (-x[1], x[0]))", "def findLongestCommonSubstringManyStrings(listOfStrings):", "def starts_with(self, matchstr, **kwargs):\r\n \r\n valid_kwargs = ['num_results', 'case_sensitive']\r\n validator.validate(kwargs.keys(), valid_kwargs)\r\n\r\n final_list = []\r\n case_sensitive = False\r\n num_results = 0\r\n \r\n if 'num_results' in kwargs:\r\n num_results = int(kwargs['num_results'])\r\n \r\n if len(matchstr) == 0:\r\n if num_results:\r\n return self.__sorted_names[0:num_results]\r\n return self.__sorted_names[:]\r\n\r\n if 'case_sensitive' in kwargs:\r\n if kwargs['case_sensitive']:\r\n case_sensitive = True\r\n\r\n tag_names_that_start_with_char = []\r\n \r\n if case_sensitive:\r\n if matchstr[0] not in self.__name_index:\r\n return []\r\n else:\r\n if matchstr[0].lower() not in self.__name_index and matchstr[0].upper() not in self.__name_index:\r\n return []\r\n \r\n if case_sensitive:\r\n idxs = self.__name_index[matchstr[0]]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']]\r\n else:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n \r\n else:\r\n if matchstr[0].lower() in self.__name_index:\r\n idxs = self.__name_index[matchstr[0].lower()]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']]\r\n else:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n\r\n if matchstr[0].upper() in self.__name_index:\r\n idxs = self.__name_index[matchstr[0].upper()]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char += [self.__sorted_names[idxs['first']]]\r\n else:\r\n tag_names_that_start_with_char += self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n \r\n if len(matchstr) == 1:\r\n if num_results == 0:\r\n return tag_names_that_start_with_char[:]\r\n else:\r\n return tag_names_that_start_with_char[0:num_results]\r\n \r\n if case_sensitive:\r\n for t in tag_names_that_start_with_char:\r\n if (t.find(matchstr) == 0):\r\n final_list.append(copy(t))\r\n if num_results > 0 and len(final_list) == num_results:\r\n return final_list\r\n else:\r\n for t in tag_names_that_start_with_char:\r\n if (t.lower().find(matchstr.lower()) == 0):\r\n final_list.append(copy(t))\r\n if num_results > 0 and len(final_list) == num_results:\r\n return final_list\r\n\r\n return final_list", "def find_function_candidates(remaining_segments):\n segment_counts = {}\n for segment in remaining_segments:\n for start in range(0, len(segment)):\n for end in range(start + 1, len(route_steps)):\n subsegment = tuple(segment[start:end + 1])\n if len(\",\".join(subsegment)) <= 20:\n segment_counts[subsegment] = segment_counts.get(subsegment, 0) + 1\n\n result = []\n for sequence, count in dict(segment_counts).items():\n if count > 1:\n result.append((list(sequence), count))\n result.sort(reverse=True, key=lambda seq: len(seq[0]) * seq[1])\n return result", "def slowComplete(prefix, list_of_words, top):\n\tfile = open(list_of_words, 'r')\n\tdata = file.readlines()\n\tdata_list = []\n\tfor i in range(len(data)):\n\t\tif i != 0:\n\t\t\tdata_list.append(data[i])\n\tnum_list = []\n\tword_list = []\n\tfor l in data_list:\n\t\tif l != '\\n':\n\t\t\tentry = l.split('\\t')\n\t\t\tnum_list.append(int(entry[0]))\n\t\t\tword_list.append(entry[1][:-1])\n\tcandidate_list = []\n\tfor i in range(len(word_list)):\n\t\tif word_list[i].startswith(prefix):\n\t\t\tcandidate_list.append((word_list[i],num_list[i]))\n\tsorted(candidate_list, key=lambda x: x[1])\n\tfinal_list = candidate_list[0:top]\n\treturn(final_list)", "def top_chars(phrase):\n list_string = phrase.split(\" \")\n phrase_without_spaces = \"\".join(list_string)\n\n letters_count = {}\n letters_count_list = []\n\n for letter in phrase_without_spaces:\n if letter in letters_count:\n letters_count[letter] += 1\n else:\n letters_count[letter] = 1\n\n for letter, count in letters_count.items():\n letters_count_list.append([letter, count])\n\n max_count = 0\n letters_with_highest_count = ['a']\n\n for letter_and_count in letters_count_list:\n if letter_and_count[1] > max_count:\n letters_with_highest_count[:] = letter_and_count[0]\n max_count = letter_and_count[1]\n elif letter_and_count[1] == max_count:\n letters_with_highest_count.append(letter_and_count[0])\n\n return sorted(letters_with_highest_count)\n\n\n \n\n\n\n \n\n\n\n return []", "def substring_search(word, collection):\n return [item for item in sorted(collection) if item.startswith(word)]", "def top_three_letters2(string):\n # create a dictionary with letter and frequency\n countdict = defaultdict(int) # gets a dictionary with initial value 0 for every key encountered during loop\n for c in string:\n countdict[c] += 1 # gets dictionary with letter frequency\n top_three = sorted(countdict, key = lambda k: countdict[k], reverse = True)[:3]\n # sorts the dictionary in place, mutates it; based on key, lambda k function, which is countdict[k], values in dictionary, reverses the sorted output\n # to get key-value pairs in descending order\n # uses slicing to get only top three elements from sorted list\n result = [(letter, countdict[letter]) for letter in top_three ] # to get the resullt in desired output format\n print(result)", "def bigSorting(unsorted):\n lookup = defaultdict(lambda: [])\n print(lookup)\n for num_string in unsorted:\n lookup[len(num_string)].append(num_string)\n\n results = []\n lengths = list(lookup.keys())\n lengths.sort()\n for length in lengths:\n x = lookup[length]\n x.sort()\n results = results + x\n print(results)\n return results", "def top_sentences(query, sentences, idf, n):\n ll=[]\n for s in sentences:\n st=sentences[s]\n st=[word.lower() for word in st]\n found_word=0\n total_idf=0\n\n for word in query:\n if word in st:\n total_idf+=idf[word]\n found_word+=1 \n ll.append((total_idf,found_word/len(st),s))\n ll.sort(reverse=True)\n #print(ll)\n ans=[]\n for i in range(n):\n ans.append(ll[i][2])\n #print(\"answer is : \",*ans)\n return ans", "def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]", "def frequent_words_by_sorting(text, k):\n frequent_patterns = []\n index = []\n count = []\n for i in range(0, len(text) - k + 1):\n pattern = text[i:i + k]\n index[i] = pattern_to_number(pattern)\n count[i] = 1\n sorted_index = sorted(index)\n for i in range(0, len(text) - k + 1):\n if sorted_index[i] == sorted_index[i-1]:\n count[i] = count[i -1] + 1\n max_count = max(count)\n for i in range(0, len(text) - k + 1):\n if count[i] == max_count:\n pattern = number_to_pattern(sorted_index[i], k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def findAllCommonSubstrings(self, string1, string2):\n countingArray = [[0]*len(string1) for i in range(len(string2))]\n ##we set up an array to locate the possible substrings\n candidates = set()\n for row in range(len(countingArray)):\n for column in range(len(countingArray[row])):\n if string2[row] == string1[column]:\n countingArray[row][column] += 1\n if not (row == 0 or column == 0):\n countingArray[row][column] += countingArray[row-1][column-1]\n candidates.add(string2[row-countingArray[row][column]+1:row+1])\n candidates = list(candidates)\n candidates.sort(key=len)\n candidates = list(reversed(candidates))\n return candidates", "def top_three_letters(string):\n print(Counter(string))\n print(Counter(string).most_common(3))", "def occurrences(substring, string, sensitive=True):\n pos = -1\n o = []\n if not sensitive:\n substring = substring.lower()\n string = string.lower()\n while True:\n pos = string.find(substring, pos + 1)\n if pos == -1:\n return o\n else:\n o.append([pos, pos + len(substring)])", "def find_all( source, substring, start=None, end=None, overlap=False ):\n return [x for x in find_all_iter( source, substring, start, end, overlap )]", "def get_comparison_substrings(substring_array, min_num_substrings=round(len(population_dist))):\n for individual in population_dist:\n rand_index = random.randint(0, len(population_dist) - end)\n parent = individual[begin + rand_index:end + rand_index]\n np.append(substring_array, parent)\n\n substring_array = np.unique(substring_array)\n if len(substring_array) < min_num_substrings:\n get_comparison_substrings(substring_array, min_num_substrings)", "def get_substrings(string, n):\n substrings = set()\n\n for i in range(len(string) - n + 1):\n substrings.add(string[i:i+n])\n\n return [substring for substring in substrings]", "def sorted_frequent_words(text: str, k: int) -> Set[str]:\n frequent_patterns = set()\n index = []\n count = []\n for i in range(len(text) - k + 1):\n pattern = text[i:i+k]\n index.append(pattern_to_number(pattern))\n count.append(1)\n\n sorted_index = sorted(index)\n\n for i in range(1, len(text) - k + 1):\n if sorted_index[i] == sorted_index[i-1]:\n count[i] = count[i-1] + 1\n\n max_count = max(count)\n\n for i in range(len(text) - k + 1):\n if count[i] == max_count:\n pattern = number_to_pattern(sorted_index[i], k)\n frequent_patterns.add(pattern)\n\n return frequent_patterns", "def top_chars(phrase):\n phrase = phrase.split()\n letter_counts = {}\n\n # loops through phrase and adds word name to key with the length of the word. If no such key exists, it is created\n for word in phrase:\n for letter in word:\n if letter in letter_counts:\n letter_counts[letter] = letter_counts[letter] + 1\n else:\n letter_counts[letter] = 1\n\n most_used = []\n # loops through each key in the dictionary of usage counts and checks if it has the highest usage count.\n # if it does, it replaces the old elements in the list. If it is used as much as the currently most-used letter,\n # it is appended to the list.\n for key in letter_counts:\n if most_used == []:\n most_used.append(key)\n elif letter_counts[key] > letter_counts[most_used[0]]:\n most_used = [key]\n elif letter_counts[key] == letter_counts[most_used[0]]:\n most_used.append(key)\n\n return sorted(most_used)", "def find_all(self, p):\n ln = self.ln\n t = self.t\n occurrences = []\n hints = self.__getHints(p)\n for i in hints:\n # compare rest char in pattern with chars in text after hinted substring\n if t[i + ln:i + len(p)] == p[ln:]:\n occurrences.append(i)\n return occurrences", "def _sorted_counter(self, input_string: str) \\\n -> List[Tuple[str, int]]:\n _counter = self._word_counter(input_string=input_string)\n\n return sorted(_counter.items(), key=lambda x: (-x[1], x[0]))", "def distinct_substrigns(str1, k):\n hash_map = {}\n n = len(str1)\n start = 0 \n end = 1\n hash_map[str1[start]] = 1\n res = [] \n\n while end < n:\n print (\"hash_map is :\", hash_map)\n if str1[end] not in hash_map: \n hash_map[str1[end]] = 1\n else:\n while str1[start]!=str1[end]:\n del hash_map[str1[start]]\n start += 1\n start += 1\n\n if end-start+1 == k:\n temp = str1[start:end+1]\n if temp not in res:\n res.append(temp)\n del hash_map[str1[start]]\n start += 1\n end += 1\n #print(end,start)\n return res" ]
[ "0.7601704", "0.57925", "0.5785811", "0.57252425", "0.56875575", "0.5674585", "0.5650752", "0.56438947", "0.55514383", "0.55237985", "0.55229783", "0.5514106", "0.5510217", "0.55069023", "0.55041426", "0.54983985", "0.54959655", "0.5426884", "0.5422324", "0.5410447", "0.53935677", "0.5372486", "0.53651184", "0.533669", "0.53307694", "0.53197336", "0.53018826", "0.5301066", "0.52902144", "0.52850926" ]
0.743758
1
Get mapping dict of service types
def get_service_mapping(): # Get all Service types: all_service_type = requests.get(base_url + 'services/v2/service_types', headers=headers3).json() # Make Dict of service names and ids service_name_to_id = {service_type['attributes']['name']:service_type['id'] for service_type in all_service_type['data']} return service_name_to_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }", "def list_services(self):\n service_types = list(self.services.keys())\n service_types.sort()\n\n services = {}\n for s_type in service_types:\n if s_type not in services:\n services[s_type] = []\n names = list(self.services[s_type].keys())\n names.sort()\n for name in names:\n services[s_type].append(name)\n return services", "def types(self) -> Dict[str, str]:\n return {name: self.hyperparams[name][0] for name in self.names()}", "def services(self) -> Mapping[str, wrappers.Service]:\n return collections.ChainMap({},\n *[p.services for p in self.protos.values()],\n )", "def _extract_catalog(self, data):\n interface = 'public'\n catalog = data['token']['catalog']\n service_map = {}\n for service in catalog:\n service_endpoint = None\n for endpoint in service['endpoints']:\n if endpoint['interface'] == interface:\n service_endpoint = endpoint['url']\n break\n if service_endpoint:\n service_map[service['type']] = service_endpoint\n LOG.debug('Service catalog: %s' % service_map)\n return service_map", "def _get_services(self, services):\n\n services_info = []\n\n for service in services[1]:\n services_info.append(self._make_dict(service))\n \n return services_info", "def _types(cls):\n return {}", "def services(self) -> dict:\n return self.data[\"services\"]", "def get_services(**options):\r\n return {}", "def get_service_types(self, api_spec: dict, user: Dict[str, Any] = None) -> dict:\n try:\n return {\n \"status\": \"success\",\n \"code\": 200,\n \"data\": {'Secondary services': 'None implemented.'},\n }\n except Exception as exp:\n return ServiceException(CapabilitiesService.name, 500, self._get_user_id(user), str(exp)).to_dict()", "def annotations(cls, types: dict) -> dict:\n return {\n name: Optional[cls.type_map[types[name]]]\n for name in types\n if types[name] in cls.type_map\n }", "def get_services(**options):\n\n return {}", "def calculate_services(source):\r\n\r\n with open(source, 'r') as thrift:\r\n namespaces = dict()\r\n types = defaultdict(set)\r\n for line in thrift:\r\n match = NAMESPACE_PARSER.match(line)\r\n if match:\r\n lang = match.group(1)\r\n namespace = match.group(2)\r\n namespaces[lang] = namespace\r\n else:\r\n match = TYPE_PARSER.match(line)\r\n if match:\r\n typename = match.group(1)\r\n name = match.group(2)\r\n types[typename].add(name)\r\n\r\n return types['service']", "def type_support_map(self) -> Mapping[AllTypes, TypeSupport]:\n raw_config: Dict[str, str] = self.options.get(\"datatypes\", {})\n config = {x: TypeSupport[y.upper()] for x, y in raw_config.items()}\n return fallback(defaultdict(lambda: TypeSupport.UNSUPPORTED), config)", "def map_service_info(port, nmap_store):\n service = port.find(\"service\")\n nmap_store[\"service_name\"] = service.get(\"name\")\n nmap_store[\"service_method\"] = service.get(\"method\")\n nmap_store[\"service_conf\"] = service.get(\"conf\")", "def typeMapping(self):\n statemachines = self.package.getStateMachines()\n classes = {}\n for sm in statemachines:\n workflowId = sm.getCleanName()\n for klass in sm.getClasses():\n # We allow to bound a workflow to a <<stub>>\n if klass.isabstract:\n continue\n elif not self.atgenerator._isContentClass(klass) and \\\n not klass.hasStereoType(self.atgenerator.stub_stereotypes):\n continue\n name = klass.getTaggedValue('portal_type') or \\\n klass.getCleanName()\n classes.setdefault(name, []).append(workflowId)\n\n classNames = classes.keys()\n classNames.sort()\n result = []\n for id_ in classNames:\n item = {}\n item['id'] = id_ # portal type\n item['workflowIds'] = classes[id_]\n result.append(item)\n\n # no need to check use_workflow, it's already done by xmiparser.XMIModel.associateClassesToStateMachines,\n # so the sm.getClasses() already returns classes which uses use_workflow tgv.\n # if you uncomment thoses lines, you will have the bound-workflow twice\n #handle the use_workflow tgvs\n #for klass in self.package.getProduct().getClasses(recursive=True):\n # if klass.hasTaggedValue('use_workflow'):\n # result.append(dict(id=klass.getCleanName(),workflowId=klass.getTaggedValue('use_workflow')))\n # remember special case\n remembertypes = []\n self.atgenerator.getRememberTypes(remembertypes, self.package)\n for remembertype in remembertypes:\n existent = False\n for type in result:\n if type['id'] == remembertype['portal_type']:\n existent = True\n if existent:\n continue\n additionaltype = dict()\n additionaltype['id'] = remembertype['portal_type']\n additionaltype['workflowIds'] = [remembertype['workflow']]\n result.append(additionaltype)\n\n # take tgv on state machine itself into account\n for sm in statemachines:\n bindings = sm.getTaggedValue('bindings', '')\n bindings = [b.strip() for b in bindings.split(', ') if b.strip()]\n for binding in bindings:\n item = {}\n item['id'] = binding\n item['workflowIds'] = [sm.getCleanName()]\n result.append(item)\n\n return result", "def _get_simple_type_mapping(simple):\n return _SIMPLE_TYPE_MAPPINGS[simple]", "def config_mapping(self) -> typing.Dict[str, type]:\n return self._subclasses", "def get_consumer_type_map():\n global _CONSUMER_TYPE_MAP\n if _CONSUMER_TYPE_MAP is not None:\n return _CONSUMER_TYPE_MAP\n tbl = db.get_table('consumer_types')\n sel = sa.select([tbl.c.id, tbl.c.code])\n sess = db.get_session()\n _CONSUMER_TYPE_MAP = {r[1]: r[0] for r in sess.execute(sel)}\n return _CONSUMER_TYPE_MAP", "def get_mapping_from_type(self, doc_dict):\n #the meta here is defined for when the case index + type is created for the FIRST time\n #subsequent data added to it will be added automatically, but date_detection is necessary\n # to be false to prevent indexes from not being created due to the way we store dates\n #all are strings EXCEPT the core case properties which we need to explicitly define below.\n #that way date sort and ranges will work with canonical date formats for queries.\n return {\n self.get_type_string(doc_dict): self.default_mapping\n }", "def type_classes(self) -> Dict[str, int]:\n return {\n \"bg\": 0,\n \"neutrophil\": 1,\n \"epithelial\": 2,\n \"lymphocyte\": 3,\n \"plasma\": 4,\n \"eosinophil\": 5,\n \"connective\": 6,\n }", "def _get_service_type(service):\n\n return service.split(':')[3]", "def get_service_type_for_service_name(self, service_name):\n from ranger_performance_tool import perf_globals\n service_type_mapping = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"service_type_mapping\")\n if service_name not in service_type_mapping.keys():\n raise Exception(f\"Unknown service name:{service_name}.\"\n f\"Add it to service_type_mapping in secondary config file\")\n return service_type_mapping[service_name]", "def _map_segments(self, type_: Any) -> Dict:\n mapping: Dict = {}\n for seg in self.segments:\n if seg.name and isinstance(seg, type_):\n if mapping.get(seg.name) and mapping.get(seg.name) != seg:\n raise ValueError(f\"Duplicate segment: {seg.name}\")\n mapping[seg.name] = seg\n return mapping", "def get_services(self):\n\t\t#Entrega el dict sin miramientos\n\t\treturn self._services", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ServiceStatus, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "def factory_type_dict():\n return {'filter' : filters.generate_filter,\n 'global_options' : global_options.generate_global_options,\n 'input_device' : input_devices.generate_input_device,\n 'input_stream' : input_streams.generate_input_stream,\n 'output_device' : output_devices.generate_output_device,\n 'output_stream' : output_streams.generate_output_stream}", "def registered_services(self):\n registry = {}\n for conv in self.conversations():\n services = json.loads(conv.get_remote('registered-services', '{}'))\n for name, data in services.items():\n registry.setdefault(name, []).append(data)\n return registry", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(VirtualService, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "def collect_type_names(types: Iterable[UnresolvedType]) -> TypeMap:\n return StrictDict({\n type_.name: type_ for type_ in visit_types(types)\n if not isinstance(type_, DeferredType)\n })" ]
[ "0.7148652", "0.69539016", "0.677271", "0.65662247", "0.63725734", "0.62752503", "0.626193", "0.623429", "0.62054604", "0.61644703", "0.61461675", "0.6043616", "0.60345", "0.6029466", "0.6017146", "0.5986938", "0.5957674", "0.59273297", "0.59249467", "0.5899342", "0.5892947", "0.5885992", "0.58773685", "0.5847196", "0.584157", "0.583916", "0.58277553", "0.57757354", "0.5752781", "0.5747489" ]
0.7781699
0
Get mapping dict of events
def get_event_mapping(): # Get all events: all_events = requests.get(base_url + 'check-ins/v2/events', headers=headers3).json() # Make Dict of event names and ids event_to_id = {event['attributes']['name']:event['id'] for event in all_events['data']} return event_to_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def event_map(self) -> dict:\n return self._event_map", "def get_event_actions_mapping(self):\n return None", "def parse_events(events_dict):\n return events_dict['events']", "def create_events():\n events = {}\n events[\"Workers_can_proceed\"] = mp.Event()\n for i in range(NUM_WORKERS):\n events[i] = mp.Event()\n return events", "def _mapping(self):\n return [('message.received', self.on_new_message), \\\n ('message.read.prevent', self.can_not_read)]", "def event_to_dict(event):\n\n # TODO Add things here\n d = {}\n d['type'] = event.name\n try:\n d['location'] = event.location\n except AttributeError:\n raise Exception(\"All events should have locations\")\n\n try:\n d['second'] = event.second\n except AttributeError:\n d['second'] = 'Event has no timestamp'\n\n try:\n d['unit'] = {\n 'id': event.unit.id,\n 'name': event.unit.name\n }\n except AttributeError:\n pass\n\n try:\n d['player'] = event.control_pid\n except AttributeError:\n pass\n\n try:\n d['unit_id'] = event.unit_id\n except AttributeError:\n pass\n\n return d", "def event_info_to_dic(self):\n event_info = {\n 'event_category': self.event_category,\n 'event_location': self.event_location,\n 'event_age_range': self.event_age_range,\n 'event_date': self.event_date,\n 'event_time': self.event_time,\n 'event_description': self.event_description,\n 'event_created_by': self.event_created_by,\n 'event_joined_by': self.event_joined_by,\n 'event_likes': self.event_likes\n }\n return event_info", "def event_message(iden: int, event: Any) -> dict[str, Any]:\n return {\"id\": iden, \"type\": \"event\", \"event\": event}", "def _map_event_to_dict(_include, sql_event):\n event = {\n attr: getattr(sql_event, attr)\n for attr in sql_event.keys()\n }\n\n for unused_field in Events.UNUSED_FIELDS:\n if unused_field in event:\n del event[unused_field]\n\n if event['type'] == 'cloudify_event':\n del event['logger']\n del event['level']\n elif event['type'] == 'cloudify_log':\n del event['event_type']\n\n # Keep only keys passed in the _include request argument\n # TBD: Do the projection at the database level\n if _include is not None:\n event = {k: v for k, v in event.items() if k in _include}\n\n return event", "def _event_to_dict(event):\n return {\n 'event_type': event.event_type,\n 'src_path': str(Path(event.src_path).relative_to(Path.cwd())),\n 'is_directory': event.is_directory\n }", "async def _get_events_from_external_cache(\n self, events: Iterable[str], update_metrics: bool = True\n ) -> Dict[str, EventCacheEntry]:\n event_map = {}\n\n for event_id in events:\n ret = await self._get_event_cache.get_external(\n (event_id,), None, update_metrics=update_metrics\n )\n if ret:\n event_map[event_id] = ret\n\n return event_map", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def convert_event_for_output(event):\n\n\tconverted_event = {}\t\n\tfor fieldname in FIELDNAMES:\n\t\tif event.has_key(fieldname):\n\t\t\tconverted_event[fieldname] = event[fieldname]\n\n\treturn converted_event", "def event_dicts(self):\n events = []\n # We're assuming that the table has alternating rows that\n # containg (date, event title) possibly followed by (<empty>,\n # event details).\n selector = '#ae-billing-logs-table > tbody > tr'\n for (date_elt, event_elt) in self.doc.cssselect(selector):\n if date_elt.text is not None:\n events.append({\n # <td>EVENT DATE</td>\n 'date': date_elt.text.strip(),\n # <td><span id=\"...\">EVENT TITLE</span></td>\n 'title': event_elt.findtext('span').strip()\n })\n else:\n # An empty first column indicates details for the\n # preceeding event.\n assert len(events) > 0, len(events)\n last_event = events[-1]\n if last_event['title'].startswith('Usage Report '):\n last_event['details'] = self._usage_report_dict(event_elt)\n return events", "async def get_already_persisted_events(\n self, events: Iterable[EventBase]\n ) -> Dict[str, str]:\n\n mapping = {}\n txn_id_to_event: Dict[Tuple[str, str, str, str], str] = {}\n\n for event in events:\n device_id = getattr(event.internal_metadata, \"device_id\", None)\n txn_id = getattr(event.internal_metadata, \"txn_id\", None)\n\n if device_id and txn_id:\n # Check if this is a duplicate of an event in the given events.\n existing = txn_id_to_event.get(\n (event.room_id, event.sender, device_id, txn_id)\n )\n if existing:\n mapping[event.event_id] = existing\n continue\n\n # Check if this is a duplicate of an event we've already\n # persisted.\n existing = await self.get_event_id_from_transaction_id_and_device_id(\n event.room_id, event.sender, device_id, txn_id\n )\n if existing:\n mapping[event.event_id] = existing\n txn_id_to_event[\n (event.room_id, event.sender, device_id, txn_id)\n ] = existing\n else:\n txn_id_to_event[\n (event.room_id, event.sender, device_id, txn_id)\n ] = event.event_id\n\n return mapping", "def events(self) -> Dict[EventCall, Set[Node]]:\n return self._events", "async def load_events(\n self,\n event_config: dict,\n guild: Guild\n ) -> Dict[int, BaseEvent]:\n events = {}\n for message_id_str, event_dict in event_config.items():\n events[int(message_id_str)] = await self.load_event(\n event_dict,\n guild\n )\n\n return events", "def get_events_dict(self, query_string, **kwargs):\n\n return json.loads(self.get_events_json(query_string, **kwargs))", "def get_events(self):\n return self.events", "def pre_serialize_event_list(events):\n events_with_locations = assign_locations_to_events(events)\n return list(map(event_to_dict, events_with_locations))", "def GetEventCountsSinceLastCall(self):\n event_map = {}\n self.lock.acquire()\n for event in self.events:\n event_map[event.name] = event.count\n event.count = 0\n self.lock.release()\n return event_map", "def _get_events_from_local_cache(\n self, events: Iterable[str], update_metrics: bool = True\n ) -> Dict[str, EventCacheEntry]:\n event_map = {}\n\n for event_id in events:\n # First check if it's in the event cache\n ret = self._get_event_cache.get_local(\n (event_id,), None, update_metrics=update_metrics\n )\n if ret:\n event_map[event_id] = ret\n continue\n\n # Otherwise check if we still have the event in memory.\n event = self._event_ref.get(event_id)\n if event:\n # Reconstruct an event cache entry\n\n cache_entry = EventCacheEntry(\n event=event,\n # We don't cache weakrefs to redacted events, so we know\n # this is None.\n redacted_event=None,\n )\n event_map[event_id] = cache_entry\n\n # We add the entry back into the cache as we want to keep\n # recently queried events in the cache.\n self._get_event_cache.set_local((event_id,), cache_entry)\n\n return event_map", "def respond_to_events(self):\n event_response = MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events)\n\n if event_response == []:\n return {}\n return event_response[0]", "def group_events(events):\n\n groups = {\n e.group: {\"events\": [], \"handler\": e.handler} for e in events\n }\n\n # allocate the events into their groups\n for e in events:\n groups[e.group][\"events\"].append(e)\n\n return groups", "def address_mapped_event(self, event):\r\n output = [event.event_name, event.from_addr, event.to_addr, \r\n time.asctime(event.when)]\r\n plog(\"DEBUG\", \" \".join(output))", "def getSlotMap(self):\n slotMap = dict()\n for entry in self.slots:\n slotMap[entry] = self.__getattribute__(\"on_\" + entry)\n return slotMap", "async def save_events(\n event_dict: Union[Dict[int, BaseEvent], Dict[int, OngoingEvent]]\n ) -> dict:\n events = {}\n for message_id, event in event_dict.items():\n events[str(message_id)] = await event.save_to_dict()\n\n return events", "def annotations_to_events(\n *,\n raw_paths: List[PathLike]\n) -> Dict[str, int]:\n event_names: List[str] = []\n for raw_fname in raw_paths:\n raw = mne.io.read_raw_fif(raw_fname)\n _, event_id = mne.events_from_annotations(raw=raw)\n for event_name in event_id.keys():\n if event_name not in event_names:\n event_names.append(event_name)\n\n event_names = sorted(event_names)\n event_name_to_code_map = {\n name: code\n for code, name in enumerate(event_names, start=1)\n }\n\n return event_name_to_code_map", "def get_events_batch() -> PayloadDictList:\n ..." ]
[ "0.80639976", "0.6997986", "0.68349934", "0.66129386", "0.6574959", "0.64685273", "0.64532745", "0.64484024", "0.64381576", "0.6359469", "0.63124835", "0.63021064", "0.63021064", "0.62903833", "0.6268375", "0.6241141", "0.6240614", "0.62224525", "0.6191082", "0.6187281", "0.616369", "0.61565304", "0.614629", "0.6063454", "0.60449976", "0.6018646", "0.6015448", "0.600851", "0.5995662", "0.5958089" ]
0.72078437
1
Get location id from event id
def get_location_id(event_id, loc_name): # Get all locations: all_locations = requests.get(base_url + f'check-ins/v2/events/{event_id}/locations', headers=headers3).json() # Make Dict of location names and ids location_to_id = {location['attributes']['name']:location['id'] for location in all_locations['data']} # Get adult attendees location id location_id = location_to_id.get(loc_name, '') return location_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def id(self) -> str:\n return self._event.get('id')", "def get_event_by_id(event_id):\n db = get_db()\n return db.execute((\n 'SELECT id, name, start_time, end_time, location '\n 'FROM event WHERE id=?'),\n (event_id,)).fetchone()", "def get_location_by_id(self, location_id):", "def event_id(self):\n return self._event_id", "def identifier(self):\n return location_id(self.__dict__)", "def getId(self):\n return _libsbml.Event_getId(self)", "def GetEventIdentifier(self):\n return self._event_identifier", "def getUniqueID(event):\n\tmatch = reUniqueID.search(event)\n\tif match:\n\t\tresult = match.group(0).split(\" \")[1].rstrip(\"\\r\")\n\t\treturn result\n\telse:\n\t\treturn None", "def locationid(self, irc, msg, args, locationName):\n try:\n locationID = self._get_locationID(locationName)\n irc.reply(locationID, prefixNick=False)\n except:\n irc.error('Unknown location')", "def get_coordinates_from_id(tracking_id=None, event_id=None):\n if tracking_id:\n json_document = mongo.read_single_document(collection='TRACKING', filter={'_id':ObjectId(tracking_id)}, projection={'coordinates':True})\n if not json_document:\n json_document = mongo.read_single_document(collection='TRACKING', filter={'_id': ObjectId(tracking_id)}, projection={'coordinates':True})\n return {'latitude': str(json_document['coordinates'][1]), 'longitude': str(json_document['coordinates'][0])}\n elif event_id:\n json_document = mongo.read_single_document(collection='EVENT', filter={'_id':ObjectId(event_id)}, projection={'coordinates':True})\n if json_document:\n return {'latitude': str(json_document['coordinates'][1]), 'longitude': str(json_document['coordinates'][0])}\n else:\n return None", "def _get_absolute(self, event):\n return Quartz.CGEventGetLocation(event)", "def get_event_eid(eid):\n return EventModel.query.get_or_404(eid)", "def test_event_id(self):\n result = self.test_client.event_id\n\n assert result == \"2130389\"", "def location_id(location_dict):\n d = location_dict\n iden = \"%s|%s|%s|%s|%s|%s\" % \\\n (d['line1'], d['line2'], d['line3'], d['city'], d['state'],\n d['postal_code'])\n if d['bbox_width'] and d['bbox_height']:\n iden += \"|%r|%r\" % (d['bbox_width'], d['bbox_height'])\n\n return iden.lower()", "def get_device_id_from_event(event):\n return event.message.annotations[\"iothub-connection-device-id\".encode()].decode()", "def _get_locator_id(self):\n return self.__locator_id", "def get_translated_id(id, lang, event=True):", "def _get_absolute(event):\n return event.locationInWindow()", "def _get_location_id(self, location):\r\n loc_svc = self.client['Location_Datacenter']\r\n datacenters = loc_svc.getDatacenters(mask='mask[longName,id,name]')\r\n for datacenter in datacenters:\r\n if datacenter['name'] == location:\r\n location = datacenter['id']\r\n return location\r\n raise ValueError('Invalid datacenter name specified.')", "def GetEventDataIdentifier(self):\n return self._event_data_identifier", "def locationElement(self, elementId):\n cmdId = self.executeCommand(Command.GET_ELEMENT_LOCATION, {'id': elementId})\n return cmdId", "def getPlugEventId(self, pid, ename):\n for event in self._events.values():\n if event.name == ename and event.pid == pid: \n return event.ID\n return None", "def get_location_id(self):\n return self.cleaned_data['location_id']", "async def eventid(self, event,event_code,sec=8):\n code = '[0-9]{4:%s}'% int(sec)\n return f\"{event[:3].upper()}{event_code}-{StringGenerator(str(code)).render(unique=True)}\"", "def event(self):\n return self.get('callback_id')", "def getID():", "def _get_next_event_id():\n VenueCrawler._event_id += 1\n return VenueCrawler._event_id", "def object_id(self) -> str:\n return self._event.get('object_id')", "def _e_to_id(self, e):\n return (e.attrib['href']\n [(e.attrib['href']\n .rfind('/id')+3):]\n .replace('?mt=2', ''))", "def get_location_from_id(id):\n tree = ET.parse('./devset_topics.xml')\n root = tree.getroot()\n for item in root.findall('./topic'):\n if id == item[0].text:\n return item[1].text" ]
[ "0.70892364", "0.68705934", "0.68487877", "0.671715", "0.66815215", "0.65408355", "0.64933246", "0.64408857", "0.6257067", "0.6248813", "0.62349504", "0.6233402", "0.6211422", "0.6202817", "0.616206", "0.61552113", "0.6094231", "0.60508996", "0.5985182", "0.5935654", "0.59334844", "0.5899282", "0.589469", "0.5875861", "0.5874031", "0.583469", "0.5818912", "0.5792607", "0.57682955", "0.5768254" ]
0.7565096
0
Get a future service plan and future service plans times
def get_future_plans(service_id, indx): # Get service type latest plan and also include plan times service_plans = requests.get(base_url + f'services/v2/service_types/{service_id}/plans?filter=future&order=sort_date&per_page=1&include=plan_times&offset={indx}', headers=headers3).json() # Get plan ids/plan times upcoming_plan_id = service_plans['data'][0]['id'] plan_times = service_plans['included'] # Get service time ids and times, times are in UTC time service_time_ids_to_time = {time['id']:time['attributes']['starts_at'] for time in plan_times if time['attributes']['time_type'] == 'service'} return upcoming_plan_id, service_time_ids_to_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plans():", "def _get_service_plan(self, service_name, service_plan_name):\n self._assert_space()\n key = ' / '.join([service_name, service_plan_name])\n if key in self._service_plan:\n return self._service_plan[key]\n self._get_service(service_name)\n service_plan_url = self._service['entity']['service_plans_url']\n res = self._cc.request(service_plan_url).get()\n for plan in res.resources:\n if service_plan_name == plan['entity']['name']:\n self._service_plan[key] = plan\n break\n return self._service_plan[key]", "def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan", "def scheduled_plan(self):\n return self._scheduled_plan", "def service_times(self):\r\n service_times = []\r\n for task in self.__tasks.values():\r\n if task.complete():\r\n x = task.service_time()\r\n service_times.append(task.service_time())\r\n return service_times", "def _get_convergence_plans(project, service_names):\n ret = {}\n plans = project._get_convergence_plans(\n project.get_services(service_names), ConvergenceStrategy.changed\n )\n for cont in plans:\n (action, container) = plans[cont]\n if action == \"create\":\n ret[cont] = \"Creating container\"\n elif action == \"recreate\":\n ret[cont] = \"Re-creating container\"\n elif action == \"start\":\n ret[cont] = \"Starting container\"\n elif action == \"noop\":\n ret[cont] = \"Container is up to date\"\n return ret", "def service_times(self):\r\n service_times = [task.service_time() for task in self.__tasks.values() if task.complete(True)]\r\n return service_times", "def start_and_service_times(self):\r\n return [(x.scheduler_launch_time, x.service_time()) for x in self.__tasks.values()\r\n if x.complete()]", "def get_scheduled_infos(self):\n\n raise NotImplementedError", "def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)", "def pending_time(self):\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n timediff = now - self.time_requested\n return timediff", "def human_print_plan(plan: object):\n print(f'Name: {plan[\"name\"]}')\n print(f'Description: {plan[\"description\"] if \"description\" in plan else \"N/A\"}')\n print(f'Services: {BackupServicePlan.service_list_to_str(plan[\"services\"])}')\n print(f'Default: {(plan[\"default\"] if \"deafult\" in plan else False)!s}')\n\n # If the are no tasks return\n if not plan[\"tasks\"]:\n return\n\n print()\n print('Tasks:')\n task_name_pad = 5\n schedule_pad = 10\n for task in plan['tasks']:\n if len(task['name']) > task_name_pad:\n task_name_pad = len(task['name'])\n\n task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule'])\n if len(task['schedule_str']) > schedule_pad:\n schedule_pad = len(task['schedule_str'])\n\n task_name_pad += 1\n schedule_pad += 1\n\n header = f'{\"Name\":<{task_name_pad}} | {\"Schedule\":<{schedule_pad}} | Options'\n print(header)\n print('-' * (len(header) + 5))\n\n for task in plan['tasks']:\n options = BackupServicePlan.format_options(task)\n print(f'{task[\"name\"]:<{task_name_pad}} | {task[\"schedule_str\"]:<{schedule_pad}} | {options}')", "def get_volunteers(service_id, upcoming_plan_id, location_id, event_id, event_period_id, service_time_ids_to_time, event_time_to_id):\r\n # Get all Team members\r\n team_members = requests.get(base_url + f'services/v2/service_types/{service_id}/plans/{upcoming_plan_id}/team_members?per_page=100', headers=headers3).json()\r\n\r\n volunteers = []\r\n # Loop through team members\r\n for person in team_members[\"data\"]:\r\n if person[\"attributes\"][\"status\"] == \"C\" or person[\"attributes\"][\"status\"] == \"U\":\r\n # get volunteer time ids\r\n time_ids=person['relationships']['times']['data']\r\n # convert time_id into times\r\n times = set(service_time_ids_to_time.get(time_id['id']) for time_id in time_ids)\r\n # convert times into event_ids\r\n check_time_ids = set(event_time_to_id.get(time) for time in times)\r\n \r\n # remove any None entry\r\n check_time_ids.discard(None)\r\n\r\n for check_t_id in check_time_ids:\r\n temp_dict = {\r\n 'check-in-kind':'Volunteer',\r\n# \"name\": person[\"attributes\"][\"name\"], # you can also add the persons name but it doesn't seem to be compulsory\r\n 'bulk_check_in[check_ins_attributes][][account_center_person_id]': person['relationships']['person']['data']['id'],\r\n 'bulk_check_in[check_ins_attributes][][check_in_times_attributes][][location_id]': location_id,\r\n 'bulk_check_in[check_ins_attributes][][check_in_times_attributes][][event_time_id]': check_t_id,\r\n 'bulk_check_in[check_ins_attributes][][check_in_times_attributes][][kind]': \"Volunteer\",\r\n 'bulk_check_in[check_ins_attributes][][event_id]': event_id,\r\n 'bulk_check_in[check_ins_attributes][][event_period_id]': event_period_id\r\n }\r\n volunteers.append(temp_dict)\r\n return volunteers", "def get_schedule():\n startdate = '02/28/2020'\n enddate = '04/01/2020'\n return statsapi.schedule(start_date=startdate, end_date=enddate, team=134)", "def get(cls, plan_id):\n return cls().requests.get(f\"plan/{plan_id}\")", "def list(cls):\n return cls().requests.get('plan')", "def planwatch(self, hours=12):\n post = {'mytime': str(hours)}\n response = self._get_page('planwatch.php', post=post)\n soup = bs4.BeautifulSoup(response.text, 'html5lib')\n results = soup.find('ul', {'id': 'new_plan_list'})\n new_plans = results.findAll('div', {'class': 'newplan'})\n resultlist = []\n for div in new_plans:\n user = div.find('a', {'class': 'planlove'}).contents[0]\n time = div.find('span').contents[0]\n time = parse_plans_date(time, tz_name=self.server_tz)\n resultlist.append((user, time))\n return resultlist", "def service_time(self):\r\n #print self.node_monitor_address, self.completion_time - self.node_monitor_launch_time\r\n return (self.completion_time - self.node_monitor_launch_time)", "def evaluate_time(plan):\n # Check if the plan is not empty\n if plan.joint_trajectory.points:\n plan_dict = message_converter.convert_ros_message_to_dictionary(plan) # type: dict\n duration_time = plan_dict['joint_trajectory']['points'][-1]['time_from_start'] # type: dict\n duration = int(duration_time['secs']) + int(duration_time['nsecs'])*10**(-9) # type: float\n rospy.loginfo('Estimated time of planning: {} s'.format(duration))\n return duration\n else:\n rospy.logwarn('The plan is empty')\n duration = 0.0\n return duration", "def response_time(self, incorporate_skew=True):\r\n if self.__arrival_time == INVALID_TIME:\r\n self.__logger.debug(\"Request %s missing arrival time\" % self.__id)\r\n return -1\r\n completion_time = self.__arrival_time\r\n for task_id, task in self.__tasks.items():\r\n if task.completion_time == INVALID_TIME:\r\n self.__logger.debug((\"Task %s in request %s missing completion \"\r\n \"time\") % (task_id, self.__id))\r\n return INVALID_TIME_DELTA\r\n task_completion_time = task.completion_time\r\n if incorporate_skew:\r\n task_completion_time -= task.clock_skew\r\n # Here we compare two event times: the completion time, as\r\n # observed the the node monitor, minus the clock skew; and the\r\n # job arrival time, as observed by the scheduler. If the\r\n # adjusted completion time is before the arrival time, we know\r\n # we've made an error in calculating the clock skew.clock_skew\r\n if task_completion_time < self.__arrival_time:\r\n self.__logger.warn((\"Task %s in request %s has estimated \"\r\n \"completion time before arrival time, \"\r\n \"indicating inaccuracy in clock skew \"\r\n \"computation.\") % (task_id, self.__id))\r\n else:\r\n \tif task.scheduler_launch_time > task.node_monitor_launch_time:\r\n\t\t\t\t\t\t\t\tself.__logger.warn(\"Task %s suggests clock skew: \" % task_id)\r\n completion_time = max(completion_time, task_completion_time)\r\n\r\n if (completion_time - self.__arrival_time) > 2000:\r\n pass\r\n \"\"\"\r\n print \"TRUE: %s\" % (completion_time - self.__arrival_time)\r\n print self.network_delays()\r\n print self.service_times()\r\n print self.probing_time()\r\n print \"EST: %s\" % (max(self.service_times()) + max(self.network_delays()) + self.probing_time())\r\n \"\"\"\r\n return completion_time - self.__arrival_time", "def get_next_allowed_request(self):\n\t\t#**********************************************************************\n\t\t# Only 4 requests a minute are allowed on VT for public API keys.\n\t\t#**********************************************************************\n\t\tdelay = VirusTotalSource.DelayBetweenRequest # Every X seconds\n\t\t#**********************************************************************\n\t\t# Provides the time of the next authorized request\n\t\t# 2 seconds are added to add some leeway.\n\t\t#**********************************************************************\n\t\treturn datetime.now() + timedelta(seconds=delay+2)", "def test_get_all_rate_plans(self):\n pass", "def resource_cost(self, resource_id, start_time=-1, eft=-1, cost_only=True):\r\n tasks_in_resource = [t for t in self.tasksOfResource[resource_id] if not t.task.dummy_task]\r\n if not tasks_in_resource:\r\n if eft == -1:\r\n return 0 if cost_only else (0, 0, 0)\r\n else:\r\n return math.ceil((eft - start_time) / self.timeslot[resource_id]) * self.price[resource_id]\r\n if start_time != -1:\r\n task_start_time = min(tasks_in_resource[0].EST, start_time)\r\n else:\r\n task_start_time = tasks_in_resource[0].EST\r\n task_finish_time = max(tasks_in_resource[-1].EFT, eft)\r\n reservation = task_finish_time - task_start_time\r\n cost = math.ceil(reservation / self.timeslot[resource_id]) * self.price[resource_id]\r\n\r\n\r\n timeslot = self.timeslot[resource_id]\r\n startof = [x.EST for x in tasks_in_resource]\r\n endof = [x.EFT for x in tasks_in_resource]\r\n\r\n if start_time != -1:\r\n startof.append(start_time)\r\n endof.append(eft)\r\n startof.sort()\r\n endof.sort()\r\n\r\n timeslot_start = min(startof)\r\n last_finish_time = max(endof)\r\n current_task_id = 0\r\n\r\n rent_periods = []\r\n\r\n while timeslot_start < last_finish_time:\r\n task_len = endof[current_task_id] - timeslot_start\r\n time_slot_finish = endof[current_task_id] + (timeslot - (task_len % timeslot)) % timeslot\r\n current_task_id += 1\r\n if current_task_id >= len(startof):\r\n rent_periods.append((timeslot_start, time_slot_finish))\r\n break\r\n if startof[current_task_id] <= time_slot_finish:\r\n pass\r\n else:\r\n rent_periods.append((timeslot_start, time_slot_finish))\r\n timeslot_start = startof[current_task_id]\r\n\r\n sum = 0\r\n for rp in rent_periods:\r\n sum += (rp[1] - rp[0])\r\n cost = sum / timeslot * self.price[resource_id]\r\n\r\n if cost_only:\r\n return cost\r\n else:\r\n return cost, min(startof), (max(endof))", "def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text", "def get_plan(self, plan_type, **kwargs):\n if \"with_constant_power\" == plan_type:\n return Plan_With_Constant_Power(starting_time = kwargs[\"starting_time\"],\n route = self.route,\n historical_weather = self.historical_weather,\n weather_forecast = self.weather_forecast,\n **kwargs)", "def get_current_period(self):\n if not self.next_billing:\n return None\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n start = self.next_billing - relativedelta(months=self.frequency)\n end = self.next_billing\n return start, end", "def svn_info_t_schedule_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass", "def get_service_times(self, per_node_service_times):\r\n for task in self.__tasks.values():\r\n if task.node_monitor_address not in per_node_service_times:\r\n per_node_service_times[task.node_monitor_address] = []\r\n per_node_service_times[task.node_monitor_address].append(task.service_time())", "async def server_time(self):\n uri = \"/fapi/v1/time\"\n success, error = await self.request(\"GET\", uri)\n return success, error", "def getSubmitTime():" ]
[ "0.61407393", "0.6079679", "0.58951896", "0.5810577", "0.57075036", "0.5627977", "0.5582494", "0.54750943", "0.54399997", "0.53883713", "0.5374862", "0.53398436", "0.5339664", "0.5310947", "0.52710015", "0.5268247", "0.5259629", "0.5187249", "0.5183262", "0.5164232", "0.51580507", "0.51537365", "0.5140088", "0.5122685", "0.5109538", "0.5081088", "0.50736415", "0.5070302", "0.506526", "0.5047192" ]
0.70387816
0
The length of a generated token
def token_length(self): return 32
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def length(self):\n return len(self.tokens)", "def Length(self) -> int:", "def Length(self) -> int:", "def __len__(self):\n return len(self._tokens)", "def parse_len_token(self, token, context):\n match = Ftype_character.len_token_re.match(token)\n if match is not None:\n return match.group(1)\n else:\n raise ParseSyntaxError(\"length type-param-value\", token=token, context=context)", "def _get_length(self):\n return self._length", "def __len__(self):\n return len(self.token2id)", "def length(self):\n pass", "def get_length(self):\n return self._length", "def get_length(self):\n return self._length", "def getLength( self, sbjct_token ):\n if not self.mIsLoaded: self.__loadIndex()\n return self.mIndex[sbjct_token][2]", "def length(self) -> int:\n pass", "def length(self):\n return self._info.length # pylint: disable=E1101", "def signature_length(self):", "def get_length(self):\n\n return self.length", "def length(self):\n\t\treturn self.n", "def length(self):\n ...", "def getLength(self):\n return self.n", "def generateNumericTokenOfLength(length: int) -> str:\n return \"\".join([r.choice(string.digits) for _ in range(length)])", "def get_length(self):\n\n return self._length", "def getLength(self):\n return self.length", "def length(self) -> 'int':\n return self._info.len", "def total_length():\n return", "def getLen(self):\n return self.len", "def test_generate_token(self):\n door_pass = DoorPassFactory.build()\n token = door_pass.generate_token()\n self.assertIsInstance(token, str)\n self.assertEqual(len(token), 40)", "def length(self):\n return self.length", "def length(self):\n return self.__length", "def length(self):\n return self.__length", "def length(self) -> int:\r\n\r\n return self.__length", "def __len__(self):\n return self._length # pylint: disable = E1101" ]
[ "0.8090891", "0.72398555", "0.72398555", "0.72175467", "0.7071612", "0.69847536", "0.6974451", "0.6898202", "0.68943214", "0.68943214", "0.689186", "0.68851024", "0.6876461", "0.68388605", "0.6801382", "0.67994404", "0.6776136", "0.6754608", "0.6752794", "0.6728152", "0.6708379", "0.67042553", "0.6701191", "0.66843826", "0.6676823", "0.6670453", "0.66634536", "0.66634536", "0.66430014", "0.6638352" ]
0.8225514
0
The type of access token we are using
def token_type(self): return 'Bearer'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def token_type(self) -> str:\n return self._token_type", "def token_type(self) -> str:\n return self._token_type", "def get_auth_token(self, request: Request, type=\"Bearer\") -> str:\n if \"Authorization\" not in request.headers:\n raise AuthenticationRequiredException\n try:\n auth_type, auth_code = request.headers[\"Authorization\"].split(' ')\n assert auth_type == type\n except Exception:\n raise AuthenticationSchemeInvalidException\n return auth_code", "def return_authorization_string(self):\n\n return \"{0} {1}\".format(self.tokenType, self.accessToken)", "def get_access_token(self, request) -> str or Exception:\n pass", "def access_token(self):\n return self.access_token_str", "def get_token(self, access_token):\n if access_token:\n return access_token\n elif self.default_access_token:\n return self.default_access_token\n else:\n return ''", "def oauth_type():\n if \"OAUTH_TYPE\" in current_app.config:\n return current_app.config[\"OAUTH_TYPE\"].lower()\n else:\n return None", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def get_access_token(self) -> Optional[Text]:\n return self.access_token", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")", "def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")" ]
[ "0.72079873", "0.72079873", "0.71791047", "0.70688397", "0.7021812", "0.69597125", "0.6859751", "0.6741833", "0.6722053", "0.6722053", "0.6722053", "0.6722053", "0.6722053", "0.6722053", "0.6722053", "0.6722053", "0.6716401", "0.6669683", "0.6669683", "0.6669683", "0.6669683", "0.6669683", "0.6669683", "0.6669683", "0.6669683", "0.6669683", "0.6669683", "0.6669683", "0.66125023", "0.66125023" ]
0.7798354
0
How long until the access token expires defaults to 1 hour
def token_expires_in(self): return 60 * 60
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_token(self):\n now = timezone.now()\n limit = now - timedelta(days=20)\n # TODO: use expires_in from response data?\n print(self.token_refresh_date)\n print(limit)\n if self.token_refresh_date < limit:\n url = '{}refresh_access_token'.format(conf.INSTAGRAM_API)\n params = {\n 'grant_type': 'ig_refresh_token',\n 'access_token': self.token\n }\n response = requests.get(url, params=params)\n data = response.json()\n else:\n print('no need to get a fresch token yet')\n return\n if response.status_code == 200 and data:\n self.token = data.get('access_token')\n self.token_refresh_date = now\n self.token_ok = True\n self.save()\n elif settings.DEBUG:\n self.token_ok = False\n self.save()\n print('could not refresh token')\n return", "def expires_in(self):\n if not self._initialized:\n return None\n\n now = datetime.now()\n delta = now - self.request_time\n ellapsed = delta.total_seconds()\n\n expires = self.token['expires_in'] - ellapsed\n\n return expires if expires > 0 else 0", "def _get_api_token_exp_from_config():\n return datetime.timedelta(\n **dict(zip(('hours', 'minutes', 'seconds'), map(int, config['app']['auth']['api_token_exp'].split(':'))))\n )", "def expire(self):\n logging.debug(\"Expiring token as wanted...\")\n self.expiration = datetime.now() - timedelta(seconds=(10))", "def refresh_token(self):\n token = json.loads(get_metadata(\n 'instance/service-accounts/%s/token' % self.service_account,\n ))\n seconds = token['expires_in'] - 60\n self._expiration_time = (\n datetime.datetime.now() + datetime.timedelta(seconds=seconds)\n )\n self._token = token['access_token']", "def _set_token_expiration_time(self, expires_in):\n self.token_expiration_time = dt.datetime.utcnow() + dt.timedelta(0, expires_in) # timedelta(days, seconds)", "def test_expired_access_token_time(self):\n\n expired = datetime.datetime.now(pytz.utc) - datetime.timedelta(\n minutes=6)\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300,\n 'created_at': expired\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(401, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']", "def access_token(self):\n if self.has_expired():\n self.update()\n\n return self.token['access_token']", "def default_expiration_delta():\n return timezone.now() + const.EXPIRY_TOKEN_DELTA", "def new_token_expiry_date():\n\treturn timezone.now() + datetime.timedelta(days=TOKEN_VALID_DATE)", "def set_expiration(self):\n # This is an arbitrary decision setting the expiration time\n # to the current date + expires_in - 10 seconds\n self.expiration = datetime.now() + \\\n timedelta(seconds=(self.expires_in - 10))\n logging.debug('Token expiration set to %s' % self.expiration)", "def build_access_token_expired():\n return do_build_access_token(tenant_id='intility_tenant_id', expired=True)", "def is_expired(self):\n if self.access_token is None:\n logging.debug('Access token not found')\n return True\n else:\n return (self.expiration <= datetime.now())", "def get_access_token(self,\n client_id=settings.OPENHUMANS_CLIENT_ID,\n client_secret=settings.OPENHUMANS_CLIENT_SECRET):\n # Also refresh if nearly expired (less than 60s remaining).\n delta = timedelta(seconds=60)\n if arrow.get(self.token_expires) - delta < arrow.now():\n self._refresh_tokens(client_id=client_id,\n client_secret=client_secret)\n return self.access_token", "def test_expires_soon(self):\n now = timezone.now()\n window = SparkSettings().RENEW_TOKEN_WINDOW\n cur = self.factory.build(access_token='good',\n expires_at=now + timedelta(seconds=window*2))\n exp = self.factory.build(access_token='expired',\n expires_at=now + timedelta(seconds=window/2))\n self.assertFalse(cur.expires_soon())\n self.assertTrue(exp.expires_soon())", "def refresh_token():\n current_user = get_jwt_identity()\n if current_user is None:\n return abort(401)\n response = deepcopy(AUTH_OKAY)\n response['payload']['access_token'] = create_access_token(\n identity=current_user,\n expires_delta=EXPIRY_DURATION\n )\n response['payload']['expires_in'] = EXPIRY_DURATION.seconds\n response['payload']['not_before'] = int(time() + EXPIRY_DURATION.seconds)\n return jsonify(response['payload']), response['status_code']", "def get_token_expiry(public=True):\n if public:\n return now() + EXPIRE_DELTA_PUBLIC\n else:\n return now() + EXPIRE_DELTA", "def get_token_expiry(public=True):\n if public:\n return now() + EXPIRE_DELTA_PUBLIC\n else:\n return now() + EXPIRE_DELTA", "def expires_in(self):\n # TODO: Use Arrow?\n expiration = datetime.datetime.fromtimestamp(self.expiration)\n now = datetime.datetime.now()\n\n return expiration - now", "def default_nonce_duration():\n return now() + timedelta(hours=4)", "def get_password_reset_expiry():\n now = timezone.now()\n return now + datetime.timedelta(days=1)", "def get_access(access_token='',expire_time=0):\r\n #Get a new access token if it expires or is five minutes away from exp#iration\r\n if (expire_time==0) or (len(access_token)==0) or (time.time()-expire_time>=-300):\r\n\r\n #API needed to authorize account with refresh token\r\n auth_url = 'https://api.tdameritrade.com/v1/oauth2/token'\r\n\r\n #Data needed for token\r\n data = {'grant_type':'refresh_token',\r\n 'refresh_token':TDAuth_Info.refresh_token,\r\n 'client_id':TDAuth_Info.client_id}\r\n\r\n #Post the data to get the token\r\n auth_reply_json = requests.post(url=auth_url,data=data)\r\n auth_reply=auth_reply_json.json()\r\n\r\n #Now use the token to get account information\r\n access_token = auth_reply['access_token']\r\n expire_time=time.time()+auth_reply['expires_in']\r\n \r\n return (access_token,expire_time)", "def expireDate(self)->datetime:\n return self.firstAccessDate + timedelta(seconds=self.expirePeriodInSeconds)", "def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True", "def get_access_control_max_age(self):\n return self.access_control_max_age", "def checkTokenTime(func):\n def wrapper(*args, **kwargs):\n config = s.query(Config).first()\n time_left = config.LastAuthDateUTC + (config.ExpiredToken * 1000) - int(datetime.datetime.now().timestamp() * 1000)\n if time_left < 10: # give 10 seconds grace\n Issuer.updateToken(Issuer)\n return func(*args, **kwargs)\n return wrapper", "def refresh_token_time_remaining(self, refresh_amount):\n current_time = (datetime.today()).timestamp() + refresh_amount\n return self._token_expire_time - current_time", "def _refresh_token(self):\n token_url = self._base_url + '/api/oauth2/token'\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self._client_id,\n 'client_secret': self._client_secret\n }\n headers = {'accept': 'application/json'}\n response = requests.post(token_url,proxies = self._proxy,params= params,headers = headers)\n logging.debug(response.text)\n parsed = response.json()\n self._access_token = parsed['access_token']\n self._refresh_token = parsed['refresh_token']\n expires_in = parsed['expires_in']\n ## Keep a buffer of 120 seconds to refresh token before expiry\n self._expires_at = datetime.now() + timedelta(seconds=(expires_in - 120))\n\n logging.debug('access_token %s expires at %s', self._access_token, self._expires_at)\n\n return", "def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token" ]
[ "0.7069979", "0.68552774", "0.6793371", "0.6770057", "0.6759449", "0.6665038", "0.66025156", "0.6565258", "0.6553815", "0.6531839", "0.6502029", "0.6496467", "0.6471388", "0.6422638", "0.64030856", "0.6399166", "0.6317765", "0.6317765", "0.6289671", "0.62332964", "0.6198888", "0.6177637", "0.6163305", "0.61493987", "0.6143929", "0.61296856", "0.6121668", "0.611744", "0.610993", "0.6108003" ]
0.82809424
0
Generate a random authorization code.
def generate_authorization_code(self): return gen_api_key(length=self.token_length)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_code(self):\n code = ''.join(\n random.choices(string.ascii_lowercase + string.digits, k=5))\n self.code = '{}{}'.format(self.user.id, code)", "def create_secret_code():\n characters = string.ascii_uppercase + string.digits\n size = 6\n return ''.join(random.choice(characters) for _ in range(size))", "def generate_code(_=None):\n chars = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\n rand = random.SystemRandom()\n return \"\".join(rand.choice(chars) for x in range(30))", "def gen_code():\n return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)])", "def generate_code(self):\n seeds = \"1234567890\"\n random_str = []\n for i in range(4):\n random_str.append(choice(seeds))\n\n return \"\".join(random_str)", "def regenerate(self):\n self.secret_code = random.randint(self.min, self.max)", "def generateAuthToken():\r\n alnum = ''.join(c for c in map(chr, range(256)) if c.isalnum())\r\n return ''.join(random.choice(alnum) for _ in range(32))", "def generate_new_code():\n code = ''.join(random.choice(string.digits) for i in range(6))\n return code", "def generate_verification_code():\n new_ver_code = str(random.randint(1000000, 9999999))\n return new_ver_code", "def generate_token():\n return uuid4()", "def generate_verification_code(self, size=10, chars=string.digits):\n return \"\".join(random.choice(chars) for _ in range(size))", "def generate_token(self):\n token = randint(100000000000000000, 999999999999999999)\n return str(token)", "def generate_random_coupon_code(suffix=None):\n code = fake.password(length=8, special_chars=False, digits=True, upper_case=True, lower_case=False)\n if suffix:\n code += suffix\n\n return code", "def __generate_random_string():\n return uuid4().hex[:6].upper()", "def _oauth_nonce_generate(self):\n\t\traw_data = random.getrandbits(32 * 8)\n\t\traw_str = ''\n\t\tfor i in range(32):\n\t\t\tnew_part = raw_data % 256\n\t\t\traw_data /= 256\n\t\t\traw_str += chr(new_part)\n\t\n\t\tencoded = base64.b64encode(raw_str) \n\t\treturn encoded.rstrip('=').replace('+', 'A').replace('/', 'B')", "def generate_password():\n return urlsafe_b64encode(urandom(32)).decode('utf-8')", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def randkey():\n return binascii.b2a_hex(os.urandom(15))", "def generate_key():\r\n\t\treturn ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(123))", "def test_random_code_generator(self):\n # Produces similar to '8FHGNH'\n code = random_code_generator()\n self.assertEquals(len(code), 6)\n code_2 = random_code_generator()\n if code == code_2:\n self.assertEquals(False)\n # Produces similar to 'CFB-U8X-9KE-TY8':\n code_3 = random_code_generator(12, 4, '-')\n self.assertEquals(len(code_3), 15)\n self.assertEquals(len(code_3.replace('-', '')), 12)\n code_4 = random_code_generator(100, banned_chars='X')\n self.assertEquals(code_4.find('X'), -1)", "def create_random_code(chars=AVAIABLE_CHARS):\n return \"\".join(\n [choice(chars) for _ in range(SIZE)]\n )", "async def code(self) -> str:\n if self.shared_secret:\n return generate_one_time_code(self.shared_secret)\n print(\"Please enter a Steam guard code\")\n code = await utils.ainput(\">>> \")\n return code.strip()", "def gen_secret() -> str:\n r = random.randrange(0, 255) # INSECURE, just for demo\n r = hex(r)[2:]\n if len(r) == 1:\n return f'0{r}'\n return r", "def _generateSecretKey():\n return ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(20))", "def _random_id():\n return binascii.hexlify(os.urandom(4)).decode()", "def generate_random_password(self):\r\n self.symbols = self.__set_symbol_dict() # set new symbol subset dict\r\n self.i = randrange(len(self.symbols)) # set new dict key pointer\r\n return \"\".join(self.__get_random_symbol() for _ in range(self.pw_len))", "def generate_secret_code():\n length = game_config['secret_rules']['length']\n secret_choices = game_config['secret_rules']['choices']\n secret = []\n\n for i in range(length):\n secret.append(secret_choices[random.randint(0, length - 1)])\n\n return secret", "def passwordGen() :\n\treturn __randomString(12)", "def create_challenge():\n\treturn os.urandom(12)", "def _get_random_number_code(self):\r\n return \"str(random.randint(0, 1e9))\"" ]
[ "0.75588864", "0.74285436", "0.74177575", "0.7316583", "0.7104523", "0.71042585", "0.70178926", "0.69639707", "0.6927824", "0.67356384", "0.6693077", "0.6623431", "0.6611711", "0.65914744", "0.65532476", "0.6545281", "0.6539805", "0.6536347", "0.6520258", "0.6507461", "0.65049386", "0.6501489", "0.64376175", "0.64221495", "0.641103", "0.6406132", "0.6401754", "0.63908356", "0.63903075", "0.63879526" ]
0.7840646
0
Generate a random access token.
def generate_access_token(self): return gen_api_key(length=self.token_length)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_new_token(self):\n self.access_token = random_auth_key()", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def generate_token(self):\n token = randint(100000000000000000, 999999999999999999)\n return str(token)", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def generate_token():\n return uuid4()", "def generate_token(email):\n access_token = create_access_token(email)\n return access_token", "def build_access_token():\n return do_build_access_token(tenant_id='intility_tenant_id')", "def generate_refresh_token(self):\n return gen_api_key(length=self.token_length)", "def regenerate_authentication_token(self):\n new_token = os.urandom(self.TOKEN_LENGTH).encode('hex')\n expires = int(time.time()) + Auth.SESSION_DURATION\n self.write(self.token_filename, ('%s %d' % (new_token, expires)))\n return new_token", "def generateToken():\n token_length = random.randint(MIN_TOKEN_LEN, MAX_TOKEN_LEN)\n token = ''.join(random.choice(POSS_TOKEN_CHARS) for _ in range(token_length))\n return token", "def generateAuthToken():\r\n alnum = ''.join(c for c in map(chr, range(256)) if c.isalnum())\r\n return ''.join(random.choice(alnum) for _ in range(32))", "def auth_token_generate(identity_param_val, expires_delta=False):\n access_token = ''\n try:\n if expires_delta is not False:\n expires_delta = timedelta(minutes=expires_delta)\n access_token = create_access_token(identity=identity_param_val, expires_delta=expires_delta)\n except Exception as e:\n print(e)\n\n return access_token", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def get_access_token(self, minutes: int = 1440) -> str:\n return crypt.encode_token({\n 'uuid': str(self.pk),\n 'space_id': str(self.space_id),\n }, timedelta(minutes=minutes))", "def generate_access_token_cache_key(token):\n\n return 'wopi_access_token_' + str(token)", "def generate(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenGenerate)['token']", "def generate_new_token(uid):\n random_token = uuid.uuid4()\n token = TokenAuth(user_id=uid, token=random_token)\n token.save()\n return random_token", "def create_access_token(identity: Union[str,int], type_token: str, fresh: Optional[bool] = False) -> bytes:\n return AuthJWT.create_token(\n identity=identity,\n type_token=type_token,\n fresh=fresh,\n exp_time=timedelta(minutes=AuthJWT._ACCESS_TOKEN_EXPIRES)\n )", "def __generate_session_token(self):\n\n return get_random_string(length=32)", "def generate_key():\n return get_token_generator().generate_token()", "def generateToken():\n length = random.randint(8, 32)\n rdmtoken = ''.join(random.choice(string.printable) for i in range(length))\n return f'{rdmtoken}'", "def access_token(self):\n access = import_string(api_settings.ACCESS_TOKEN_CLASS)()\n\n # Use instantiation time of refresh token as relative timestamp for\n # access token \"exp\" claim. This ensures that both a refresh and\n # access token expire relative to the same time if they are created as\n # a pair.\n access.set_exp(from_time=self.current_time)\n\n no_copy = self.no_copy_claims\n for claim, value in self.payload.items():\n if claim in no_copy:\n continue\n access[claim] = value\n\n access.set_issuer()\n access.set_audience()\n\n # in order to encode token with new claims\n return str(access)", "def build_access_token_normal_user():\n return do_build_access_token(tenant_id='intility_tenant_id', admin=False)", "def build_evil_access_token():\n return do_build_access_token(tenant_id='intility_tenant_id', evil=True)", "def access_token(self):\n if self.has_expired():\n self.update()\n\n return self.token['access_token']", "def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error", "def make_token():\n return secrets.token_urlsafe(36)", "def generate_state_token():\n chars = (ascii_letters + digits)\n rand = SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(len(chars)))\n return hmac.new(\n config.SECRET_KEY.encode('utf-8'),\n random_string.encode('utf-8'),\n hashlib.sha256\n ).hexdigest()", "def build_access_token_guest():\n return do_build_access_token(tenant_id='guest_tenant_id')", "def build_access_token_expired():\n return do_build_access_token(tenant_id='intility_tenant_id', expired=True)" ]
[ "0.82569", "0.745192", "0.7336344", "0.7269252", "0.72377497", "0.71828794", "0.6926926", "0.6832841", "0.68226", "0.681461", "0.68085945", "0.6764717", "0.6703978", "0.6695814", "0.66630363", "0.66609293", "0.66137046", "0.6607952", "0.65929705", "0.65886843", "0.6582464", "0.6566401", "0.6517091", "0.64756083", "0.6457345", "0.644685", "0.6418113", "0.6387152", "0.63731223", "0.63706" ]
0.80414665
1
Generate a random refresh token.
def generate_refresh_token(self): return gen_api_key(length=self.token_length)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_new_token(self):\n self.access_token = random_auth_key()", "def refresh_token():\n json_request = request.json\n refresh_token = json_request.get('refresh_token')\n if not refresh_token:\n return msg.errors.bad_request(\n 'You should provide refresh token for this call')\n refresh_token_obj = RefreshToken.valid_token(refresh_token)\n if not refresh_token_obj:\n return msg.errors.unauthorized('Provided refresh token is not valid')\n access_token = generate_token(refresh_token_obj.user_id)\n return msg.success(\n message='New access token generated',\n access_token=access_token)", "def generate_token():\n return uuid4()", "def generate_token(self):\n token = randint(100000000000000000, 999999999999999999)\n return str(token)", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def regenerate_authentication_token(self):\n new_token = os.urandom(self.TOKEN_LENGTH).encode('hex')\n expires = int(time.time()) + Auth.SESSION_DURATION\n self.write(self.token_filename, ('%s %d' % (new_token, expires)))\n return new_token", "def generate_new_token(self, refresh_token: str | None = None) -> None:\n if not refresh_token:\n user_name = self.cred['identifier']\n password = self.cred['password']\n data = f'username={user_name}&password={password}'\n url = '/rest/api/auth/userpass'\n\n else:\n data = f'token={self.refresh_token}'\n url = '/rest/api/auth/token'\n\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n\n response = self._http_request(\n 'POST', url, data=data, headers=headers)\n\n new_token = response.get('accessToken')\n refresh_token = response.get('refreshToken')\n\n if new_token:\n self._headers['NetWitness-Token'] = new_token\n self.refresh_token = refresh_token\n demisto.setIntegrationContext({'token': new_token, 'refresh_token': refresh_token})\n\n else:\n raise DemistoException(\"Error in authentication process- couldn't generate a token\")", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def generate_new_token(uid):\n random_token = uuid.uuid4()\n token = TokenAuth(user_id=uid, token=random_token)\n token.save()\n return random_token", "def __generate_session_token(self):\n\n return get_random_string(length=32)", "def create_refresh_token(identity: Union[str,int], type_token: str) -> bytes:\n return AuthJWT.create_token(\n identity=identity,\n type_token=type_token,\n exp_time=timedelta(days=AuthJWT._REFRESH_TOKEN_EXPIRES)\n )", "def generateToken():\n length = random.randint(8, 32)\n rdmtoken = ''.join(random.choice(string.printable) for i in range(length))\n return f'{rdmtoken}'", "def generate(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenGenerate)['token']", "def regenerate(self):\n self.secret_code = random.randint(self.min, self.max)", "def generateToken():\n token_length = random.randint(MIN_TOKEN_LEN, MAX_TOKEN_LEN)\n token = ''.join(random.choice(POSS_TOKEN_CHARS) for _ in range(token_length))\n return token", "def refresh_token():\n return current_app.library_registry.admin_controller.refresh_token()", "def refresh_token(refresh_token):\r\n \r\n return None", "def generate_new_token(cls):\n token = proquint.generate()\n\n # Try 100 times to generate a unique token.\n TRIALS = 100\n for __ in range(TRIALS):\n token = proquint.generate()\n if SecretToken.exists(token):\n continue\n break\n # after TRIALS attempts and we didn't get a unique token,\n # just raise an error.\n # See https://stackoverflow.com/a/9980160 on what for-else loop does.\n else:\n raise ValueError(\"Cannot generate new token\")\n\n # We found a unique token! Save it\n return token", "def refresh():\n current_user = get_jwt_identity()\n ret = {\n 'access_token': create_access_token(identity=current_user)\n }\n return jsonify(ret), 200", "def RefreshToken():\n params = {}\n params['client_id'] = Constants.USER['CLIENT_ID']\n params['client_secret'] = Constants.USER['CLIENT_SECRET']\n params['refresh_token'] = Constants.AUTH['REFRESH']\n params['grant_type'] = 'refresh_token'\n\n data = urllib.urlencode(params)\n\n headers = {\n 'User-Agent': 'LogoCert Client',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'text/html, */*',\n }\n\n request_url = Constants.OAUTH_TOKEN\n\n request = urllib2.Request(request_url, data, headers)\n res = urllib2.urlopen(request)\n response = res.read()\n return json.loads(response)", "def refresh_token():\n current_user = get_jwt_identity()\n if current_user is None:\n return abort(401)\n response = deepcopy(AUTH_OKAY)\n response['payload']['access_token'] = create_access_token(\n identity=current_user,\n expires_delta=EXPIRY_DURATION\n )\n response['payload']['expires_in'] = EXPIRY_DURATION.seconds\n response['payload']['not_before'] = int(time() + EXPIRY_DURATION.seconds)\n return jsonify(response['payload']), response['status_code']", "def generate_access_token(self):\n return gen_api_key(length=self.token_length)", "def generateAuthToken():\r\n alnum = ''.join(c for c in map(chr, range(256)) if c.isalnum())\r\n return ''.join(random.choice(alnum) for _ in range(32))", "def gen_csrf_secret():\n\treturn Random.new().read(csrf_secret_len)", "async def generate_new_refesh_key(payload: dict = Depends(get_jwt_payload)):\n if payload[\"type\"] != \"refresh\":\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"You gave the access key, but we need the refresh key\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n\n # <- Your token revocation code should be here!\n\n access_token_data = jwt_claims.copy()\n access_token_data[\"sub\"] = payload[\"sub\"]\n access_token_data[\"exp\"] = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token_data[\"jti\"] = str(uuid.uuid4())\n\n return AccessToken(access_token=jwt.encode(access_token_data, SECRET_KEY, algorithm=ALGORITHM))", "def password_token_oracle():\n past_time = int(time.time()) - random.randint(1, 3600)\n return generate_password_reset_token(past_time), past_time", "def _generate_token_value():\n return secrets.token_urlsafe()", "def generate_key():\n return get_token_generator().generate_token()", "def refresh_token(self):\n # basic function to get an access token\n api_response = requests.get(\n self.api_config.get_api_url() + \"authentication/g?username=\" + self.api_config.get_api_username() + \"&password=\" + self.api_config.get_api_password())\n\n if api_response.status_code >= 200:\n self.API_TOKEN = api_response.content.decode()\n\n return self.API_TOKEN\n else:\n return None" ]
[ "0.76322883", "0.74220634", "0.7369915", "0.7336193", "0.72870505", "0.72259885", "0.7058188", "0.705621", "0.70044553", "0.6850953", "0.6800217", "0.6707107", "0.6685085", "0.66625905", "0.6657137", "0.6596008", "0.65536267", "0.654388", "0.65340185", "0.6531621", "0.64729536", "0.6462713", "0.6440464", "0.6428968", "0.6422269", "0.64191455", "0.6405395", "0.6404847", "0.6384399", "0.63843715" ]
0.828994
0
Verifies the authorization request and returns an auth code if requested
def verify_auth_request(self, *args, **kwargs): if len(args) == 1: url = args[0] qs = get_query_string(url) response_type = qs.pop('response_type', None) client_id = qs.pop('client_id', None) redirect_uri = qs.pop('redirect_uri', None) scope = qs.pop('scope', None) state = qs.pop('state', None) elif len(args) == 2: response_type = args[0] client_id = args[1] redirect_uri = kwargs.pop('redirect_uri', None) scope = kwargs.pop('scope', None) state = kwargs.pop('state', None) if not client_id: return self.invalid_request( error_description = 'client_id is required' , redirect_uri = redirect_uri , state = state ) if not response_type: return self.invalid_request( error_description = 'response_type is required' , redirect_uri = redirect_uri , state = state ) is_client_id_valid = self.verify_client_id(client_id) if not is_client_id_valid: return self.unauthorized_client( redirect_uri = redirect_uri , state = state ) if redirect_uri == None: redirect_uri = self.get_redirect_uri(client_id) is_redirect_uri_valid = self.verify_redirect_uri(client_id, redirect_uri) if not is_redirect_uri_valid: return self.invalid_request() is_scope_valid = self.verify_scope(scope) if not is_scope_valid: return self.invalid_scope( redirect_uri = redirect_uri , state = state ) is_authenticated = self.authenticate_user() if not is_authenticated: return self.access_denied( redirect_uri = redirect_uri , state = state ) if response_type == 'code': # We are doing 4.1.1 code = self.generate_authorization_code() # Save information to be used to validate later requests self.save_auth_code( client_id , code , scope , redirect_uri ) new_qs = {'code': code} if state: new_qs['state'] = state return { 'redirect_uri': clean_url(redirect_uri, new_qs, should_force_ssl=self.should_force_ssl ) } elif response_type == 'token': # We are doing 4.2.1 token = self.generate_access_token() self.save_auth_token(token, None) # don't issue a refresh token in this mode #TODO: If scope is different than requested, return it return {'access_token': token } else: return self.unsupported_response_type( redirect_uri = redirect_uri , state = state )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_auth_code(self, code):\n raise NotImplementedError(\n \"\"\"\n verify_scope must be implemented by a child class\n \"\"\"\n )", "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def get_user_authorization(request_token):\n authorize_url = AUTHORIZE_URL\n authorize_url = authorize_url.format(request_token=request_token)\n print 'Please go here and authorize: ' + authorize_url\n return raw_input('Please input the verifier: ')", "def authenticate():\n\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n get_auth_headers())", "def check_auth():", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})", "def authenticate():\n return abort(401)", "def verifier(self,code):\n \n client = oauth.Client(self.consumer)\n resp, content = client.request(self.access_token_url, \"POST\")\n if resp['status'] != '200':\n print resp\n raise FBError(\"Invalid response %s.\" % resp['status'])\n access_token = dict(urlparse.parse_qsl(content))\n self._access_token = access_token", "def step_impl(context):\n fields = {\n 'response_type': 'code',\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n 'scope': context.vendor_config['versioned_auth']['scope'],\n 'state': uuid.uuid4(),\n }\n\n fields.update(dict(context.table))\n\n authorize_uri = get_authorize_uri(context)\n\n response = requests.get(authorize_uri,\n params=fields,\n allow_redirects=False,\n timeout=5)\n\n context.response = response", "def confirm_authorization_request(self):\n server = self.server\n scope = request.params.get('scope') or ''\n scopes = scope.split()\n credentials = dict(\n client_id=request.params.get('client_id'),\n redirect_uri=request.params.get('redirect_uri', None),\n response_type=request.params.get('response_type', None),\n state=request.params.get('state', None),\n )\n log.debug('Fetched credentials from request %r.', credentials)\n redirect_uri = credentials.get('redirect_uri')\n log.debug('Found redirect_uri %s.', redirect_uri)\n\n uri, http_method, body, headers = extract_params(True)\n try:\n ret = server.create_authorization_response(\n uri, http_method, body, headers, scopes, credentials\n )\n log.debug('Authorization successful.')\n return create_response(*ret)\n except oauth2.FatalClientError as e:\n log.debug('Fatal client error %r', e)\n return redirect(e.in_uri(self.error_uri))\n except oauth2.OAuth2Error as e:\n log.debug('OAuth2Error: %r', e)\n return redirect(e.in_uri(redirect_uri or self.error_uri))", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def authenticate():\n resp = {\"status\": 401, \"message\": \"Could not verify your access level for that URL\"}\n return Response(dumps(resp), status=404, mimetype='application/json')", "async def authorization(request):\n # Decode tokens, load/check users and etc\n # ...\n # in the example we just ensure that the authorization header exists\n return request.headers.get(\"authorization\", \"\")", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def authenticate():\n try:\n return redirect(authorize_uri + '?client_id=' + client_id + \\\n '&response_type=code&redirect_uri=' + redirect_uri + '&scope=user-library-read user-modify-playback-state')\n except Exception as e:\n return ('authenticate() threw ' +str(e))", "def step_impl(context, request_type):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance,\n request_type)", "def get_authorization():\n return True", "def auth_verify():\n try:\n oauth_verifier = request.args.get('oauth_verifier')\n if not oauth_verifier:\n raise Exception('expected oauth_verifier parameter')\n auth_token = session_get('auth_token')\n auth_token_secret = session_get('auth_token_secret')\n auth_redirect = session_get('auth_redirect')\n if not (auth_token and auth_token_secret):\n raise Exception('Authorization credentials not found in session')\n tk = get_twitter_keys()\n client = UserClient(tk.consumer_key, tk.consumer_secret,\n auth_token, auth_token_secret)\n token = client.get_access_token(oauth_verifier)\n session_set('access_token', token.oauth_token)\n session_set('access_token_secret', token.oauth_token_secret)\n session_pop_list(['auth_token', 'auth_token_secret', 'auth_redirect'])\n if auth_redirect:\n return redirect(auth_redirect)\n else:\n return redirect(url_for('home'))\n except Exception, e:\n traceback.print_exc()\n return redirect(auth_redirect)", "def get_auth_token(self, request: Request, type=\"Bearer\") -> str:\n if \"Authorization\" not in request.headers:\n raise AuthenticationRequiredException\n try:\n auth_type, auth_code = request.headers[\"Authorization\"].split(' ')\n assert auth_type == type\n except Exception:\n raise AuthenticationSchemeInvalidException\n return auth_code", "def authorized():\n code = bottle.request.query.code\n auth_state = bottle.request.query.state\n if auth_state != SESSION.auth_state:\n raise Exception('state returned to redirect URL does not match!')\n auth_context = adal.AuthenticationContext(config.AUTHORITY_URL, api_version=None)\n token_response = auth_context.acquire_token_with_authorization_code(\n code, config.REDIRECT_URI, config.RESOURCE, config.CLIENT_ID, config.CLIENT_SECRET)\n SESSION.headers.update({'Authorization': f\"Bearer {token_response['accessToken']}\",\n 'User-Agent': 'adal-sample',\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'SdkVersion': 'sample-python-adal',\n 'return-client-request-id': 'true'})\n return bottle.redirect('/graphcall')", "def request_verification(data):\n if 'email' in data:\n if user_exists(data['email']):\n return get_user_id(data['email'])\n else:\n return 401\n else:\n return 400", "def authorize(req, resp):\n api.redirect(resp, location=authorize_url())", "def _handleRequestAuthorization(self, data):\r\n print(\"\\\"Request Authorization\\\" received\")\r\n message = self.whitebeet.v2gParseRequestAuthorization(data)\r\n timeout = int(message['timeout'] / 1000) - 1\r\n # Promt for authorization status\r\n auth_str = input(\"Authorize the vehicle? Type \\\"yes\\\" or \\\"no\\\" in the next {}s: \".format(timeout))\r\n if auth_str is not None and auth_str == \"yes\":\r\n print(\"Vehicle was authorized by user!\")\r\n try:\r\n self.whitebeet.v2gSetAuthorizationStatus(True)\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))\r\n else:\r\n print(\"Vehicle was NOT authorized by user!\")\r\n try:\r\n self.whitebeet.v2gSetAuthorizationStatus(False)\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))", "def auth_error():\n return unauthorized('Invalid credentials')", "def check_auth_interactive_response(self, responses):\n return AUTH_FAILED", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def check_authorization(self, params):\n response = self.post(f\"{self.gateway_path}/check_authorization\", params)\n return response", "def authenticate():\r\n return Response(\r\n 'Could not verify your access level for that URL.\\n'\r\n 'You have to login with proper credentials', 401,\r\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def oauth2_process_code(self, request, redirect_uri):\n if 'code' in request.GET:\n # We've got a code from an authorisation, so convert it to a access_token\n\n self.oauth2_access_token(request.GET['code'], next=redirect_uri)\n\n request.session['oauth2_token'] = self.oauth2_token\n request.session['oauth2_token_expires'] = self.oauth2_token_expires\n\n return True\n # else: 'error_reason' in request.GET\n \n return False" ]
[ "0.6792753", "0.67879415", "0.6754928", "0.66999227", "0.664164", "0.6627876", "0.6587151", "0.6568782", "0.6545388", "0.6545202", "0.65152884", "0.6478774", "0.64485216", "0.6409453", "0.6403518", "0.63865304", "0.6366206", "0.63432086", "0.63319945", "0.63268024", "0.63036984", "0.6293214", "0.62745476", "0.6270816", "0.6268133", "0.6265261", "0.62625283", "0.626094", "0.62600464", "0.6259025" ]
0.69689685
0
This persists the authorization code to a datastore for checking against on the next request
def save_auth_code(self, client_id, code, scope, redirect_uri): raise NotImplementedError( """ save_auth_code must be implemented by a child class """ )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_authorization_code(self, client_id, code, request, *args, **kwargs):\n log.debug('Persist authorization code %r for client %r', code, client_id)\n request.client = request.client or self._clientgetter(client_id)\n self._grantsetter(client_id, code, request, *args, **kwargs)\n return request.client.default_redirect_uri", "def auth(self):\n if self.get_saved_token():\n return\n self.oauth2()\n self.save_token()", "def authorized(self):\n pass", "def authorize_self(self, card_info=None):\n self.state = 'authorized'\n self.save()", "def auth_code_handler(self, request, pk=None):\n try:\n # Get xero auth access information form xero connection\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n\n\n if len(stored_values) == 0:\n return Utils.dispatch_failure(request, 'NO_TOKEN_AUTHENTICATION')\n\n secret_keys = Utils.get_access_keys(pk)\n if AccountingConfiguration.PRIVATE == secret_keys.type:\n exists = AccountingOauth2.objects.filter(company=pk).first()\n if not exists:\n auth = AccountingOauth2(accessToken=stored_values['consumer_key'],\n accessSecretKey=stored_values['rsa_key'],\n company_id=pk)\n auth.save()\n else:\n exists.accessToken = stored_values['consumer_key']\n exists.accessSecretKey = stored_values['rsa_key']\n exists.save()\n else:\n auth_verifier_uri = settings.XERO_AUTH_VERIFIER_URI\n oauth_verifier = request.GET.get('oauth_verifier')\n credentials = Utils.get_xero_public_credentials(stored_values)\n\n if credentials.expired():\n return Utils.dispatch_failure(request, 'NO_TOKEN_AUTHENTICATION')\n\n # Verify the auth verifier for establish the connection\n\n credentials.verify(oauth_verifier)\n # Resave our verified credentials\n for key, value in credentials.state.items():\n OAUTH_PERSISTENT_SERVER_STORAGE.update({key: value})\n\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n exists = AccountingOauth2.objects.filter(company=pk).first()\n\n if exists:\n exists.accessToken = stored_values['oauth_token']\n exists.realmId = oauth_verifier\n exists.accessSecretKey = stored_values['oauth_token_secret']\n exists.tokenAcitvatedOn = stored_values['oauth_expires_at']\n exists.tokenExpiryON = stored_values['oauth_authorization_expires_at']\n exists.save()\n else:\n auth = AccountingOauth2(accessToken=stored_values['oauth_token'],\n refreshToken='',\n realmId=oauth_verifier,\n accessSecretKey=stored_values['oauth_token_secret'],\n tokenAcitvatedOn=stored_values['oauth_expires_at'],\n tokenExpiryON=stored_values['oauth_authorization_expires_at'],\n company_id=pk)\n auth.save()\n # auth_redirect_url = os.environ.get ('QBO_AUTH_REDIRECT_URL',\n # 'http://localhost:4200/coa-match/quickbooks')\n\n # auth_redirect_url = os.environ.get ('QBO_AUTH_REDIRECT_URL','http://ec2-52-207-28-114.compute-1.amazonaws.com/ix/coa-match/quickbooks')\n\n # return redirect(auth_redirect_url)\n\n except Exception as e:\n auth_cancel_url = settings.QBO_AUTH_CANCEL_URL\n Utils.send_company_misconfig(pk, e)\n return redirect(auth_cancel_url + '/error')\n #return Utils.dispatch_success(request, 'TOKEN_ALREADY_VALIDATED')\n\n auth_redirect_url = settings.XERO_AUTH_REDIRECT_URL\n return redirect(auth_redirect_url)\n # return Utils.dispatch_success(request, stored_values)", "def authorize():\n\n google = oauth.create_client('google') # create the google oauth client\n # Access token from google (needed to get user info)\n token = google.authorize_access_token()\n # userinfo contains stuff u specificed in the scrope\n resp = google.get('userinfo')\n user_info = resp.json()\n email = user_info['email']\n first_name = user_info['given_name'].capitalize()\n last_name = user_info['family_name'].capitalize()\n image_url = User.image_url.default.arg\n if user_info['picture'] != '' and user_info['picture']:\n image_url = user_info['picture']\n user = oauth.google.userinfo() # uses openid endpoint to fetch user info\n # Here you use the profile/user data that you got and query your database find/register the user\n # and set ur own data in the session not the profile from google\n\n # compare with database;\n user = User.query.filter_by(email=email).first()\n print(user)\n\n if not user:\n user = User.signup(email=email, password='password1', username='temporary',\n first_name=first_name, last_name=last_name, image_url=image_url, cover_url=User.cover_url.default.arg)\n db.session.commit()\n do_login(user)\n return redirect(f'/user/{user.id}/force-reset')\n else:\n do_login(user)\n return redirect('/')", "def authorization():\n pass", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def take_auth(aid):\r\n auth_passwd = request.values.get('auth_passwd', '')\r\n with engine.with_session() as ss:\r\n cur_auth = ss.query(LxContractAuthorization).get(aid)\r\n if not cur_auth:\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'AUTH_NOT_EXISTS']})\r\n if not sha256_crypt.verify(auth_passwd, cur_auth.auth_passwd):\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'AUTH_PASSWD_ERROR']})\r\n update_dict = dict()\r\n update_dict['user'] = current_user\r\n cur_auth.update(update_dict)\r\n return jsonify({'success': True, 'data': cur_auth.contract_id})", "def set_auth_state(self, data):\n raise NotImplementedError()", "def authorize(self):\n\n\t\tprint 'Authorizing...'\n\n\t\tif self.youtube:\n\t\t\tprint 'Already authorized'\n\t\t\treturn False\n\n\t\tself.youtube = build(self.YOUTUBE_API_SERVICE_NAME, \n\t\t\t\t\t\t self.YOUTUBE_API_VERSION,\n \t\t\t\t\t\t developerKey=self.DEVELOPER_KEY)", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def step_impl(context):\n fields = {\n 'response_type': 'code',\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n 'scope': context.vendor_config['versioned_auth']['scope'],\n 'state': uuid.uuid4(),\n }\n\n fields.update(dict(context.table))\n\n authorize_uri = get_authorize_uri(context)\n\n response = requests.get(authorize_uri,\n params=fields,\n allow_redirects=False,\n timeout=5)\n\n context.response = response", "def finalize():\n registration_token = guard.read_token_from_header()\n user = guard.get_user_from_registration_token(registration_token)\n user.is_active = True\n db.session.commit()\n ret = {'access_token': guard.encode_jwt_token(user)}\n return jsonify(ret), 200", "def save_credentials(self):\n Stores.account_store.append(self.register_stores())", "def authorization_successful(req, resp):\n params = {\n \"client_id\": os.getenv('STRAVA_CLIENT_ID'),\n \"client_secret\": os.getenv('STRAVA_CLIENT_SECRET'),\n \"code\": req.params.get('code'),\n \"grant_type\": \"authorization_code\"\n }\n r = requests.post(\"https://www.strava.com/oauth/token\", params)\n logger.debug(r.text)\n resp.text = r.text", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def save(self):\n # EXERCISE:\n # - save self.access_token, self.user_id, self.save_message to access token file AccessData.ACCESS_TOKEN_FILE\n # @see http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python\n# TODO ==> INSERT CODE HERE <==\n\n logger.debug('saved access token in file %s' % (AccessData.ACCESS_TOKEN_FILE))", "async def authorize(self):\n # TODO: make several attempts for each step\n html = await self.get_auth_page()\n url, html = await self.process_auth_form(html)\n q = urllib.parse.urlparse(url)\n if q.path == '/authorize': # invalid login or password\n url, html = await self.process_auth_form(html)\n q = urllib.parse.urlparse(url)\n if q.path == '/login':\n url, html = await self.process_2auth_form(html)\n q = urllib.parse.urlparse(url)\n if q.path == '/login':\n url, html = await self.process_2auth_form(html)\n q = urllib.parse.urlparse(url)\n if q.path == '/authorize': # give rights for app\n url, html = await self.process_access_form(html)\n q = urllib.parse.urlparse(url)\n if q.path == '/blank.html':\n qs = dict(urllib.parse.parse_qsl(q.fragment))\n self.access_token = qs['access_token']", "def authorize():\n resp = git_auth.authorized_response()\n user_info = git_auth.get('user', token=(resp[\"access_token\"],)).data\n u = db_session.query(User).filter(User.email == user_info['email']).first()\n if not u:\n u = User(user_info['login'], user_info['email'])\n db_session.add(u)\n db_session.commit()\n login_user(u, remember=True)\n return redirect(url_for('index'))", "def authorize():\n token = oauth.tapkey.authorize_access_token()\n session['auth'] = token\n return redirect(url_for('owner_account_chooser'))", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def save_token(self):\n db.session.add(self)\n db.session.commit()", "def step_impl(context, request_type):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance,\n request_type)", "def store_auth_token(auth_token_value):\n if Token(key_name = 'authtoken', value = auth_token_value).put():\n memcache.set('authtoken', auth_token_value)\n return True\n else:\n return False", "def callback():\n code = request.args.get('code')\n result = http.post(token_uri, data = {\n 'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': redirect_uri,\n 'client_id': client_id,\n 'client_secret': client_secret\n })\n data = result.json()\n \n access_token = data['access_token']\n refresh_token = data['refresh_token']\n \n cache.set('access_token', access_token)\n cache.set('refresh_token', refresh_token)\n\n return redirect('/')", "def auth():\n\tcode = request.query.code\n\tauth = 'https://foursquare.com/oauth2/access_token'\n\tparams = dict(\n\t\tclient_id=CLIENT_ID,\n\t\tclient_secret=CLIENT_SECRET,\n\t\tgrant_type='authorization_code',\n\t\tredirect_uri=REDIRECT_URI,\n\t\tcode=code\n\t)\n\tauth_says = fetch('%s?%s'%(auth, urlencode(params)))\n\tauth_response = json.loads(auth_says.content)\n\tif 'access_token' in auth_response:\n\t\toauth_token=auth_response['access_token']\n\t\tresponse.set_cookie('user', oauth_token, secret=CLIENT_SECRET)\n\t\tlogging.info('new oauth_token:%s'%oauth_token)\n\t\tredirect('/')\n\telse:\n\t\tlogging.error(auth_response)\n\t\tabort()", "def authorized():\n code = bottle.request.query.code\n auth_state = bottle.request.query.state\n if auth_state != SESSION.auth_state:\n raise Exception('state returned to redirect URL does not match!')\n auth_context = adal.AuthenticationContext(config.AUTHORITY_URL, api_version=None)\n token_response = auth_context.acquire_token_with_authorization_code(\n code, config.REDIRECT_URI, config.RESOURCE, config.CLIENT_ID, config.CLIENT_SECRET)\n SESSION.headers.update({'Authorization': f\"Bearer {token_response['accessToken']}\",\n 'User-Agent': 'adal-sample',\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'SdkVersion': 'sample-python-adal',\n 'return-client-request-id': 'true'})\n return bottle.redirect('/graphcall')", "def secretstore():\n pass", "def authorise(data, ind):\n global approved\n global pending_sheet\n approved.append_row(data)\n ind += 1\n pending_sheet.delete_rows(ind)\n print(colored('\\nApplication authorised.\\n', 'cyan', attrs=['bold']))" ]
[ "0.685759", "0.5918923", "0.5841991", "0.5800864", "0.577557", "0.5707651", "0.56489456", "0.55262905", "0.5508438", "0.5504208", "0.5499275", "0.5497645", "0.5494326", "0.5481592", "0.5458928", "0.54573834", "0.53886586", "0.53853977", "0.53600675", "0.5358889", "0.5333976", "0.5322007", "0.5316388", "0.5311007", "0.5310357", "0.5306451", "0.52847594", "0.52758026", "0.5220541", "0.5203574" ]
0.65713686
1
This validates that the auth_code is legitimate and attached to an active user Should return True or False
def verify_auth_code(self, code): raise NotImplementedError( """ verify_scope must be implemented by a child class """ )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def verifyActivationCode( self, username, activation_code ):\n try:\n con = self.getMetadataDatabaseConnection()\n user_data = con.cursor()\n con.cursor().callproc('verify_user_activation_code', [username, activation_code, user_data])\n row = user_data.fetchone()\n if row:\n return True\n else:\n return False\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def checkCode():\n code = {'code': request.json['code'], 'phone': request.json['phone']}\n # if code and phone exist authorize user\n user = models.User.query.filter_by(code=code['code'], phone=code['phone']).first()\n value = \"User and SMS code don't exist\"\n if (user):\n user.is_verified = True\n value = \"Code exists!\"\n db.session.commit()\n resp = Response(json.dumps(value), status=200, mimetype='application/json')\n return resp", "def validate_key_code(self, code):\n\n key = self.connect().query(KeyCode)\\\n .filter(KeyCode.code == code)\\\n .first()\n\n if key and (key.user and key.enabled):\n return True\n return False", "def activate_profile(field, code, request):\n try:\n activation = ActivationProfile.objects.get(**{field:code})\n except ActivationProfile.DoesNotExist:\n messages.error(request, _('Activation code expired or not valid!'))\n return False\n if timezone.now() < activation.valid_through:\n activation.user.is_active = True\n activation.user.set_unusable_password()\n activation.user.save()\n if request.user.is_anonymous():\n if field == 'token':\n user = authenticate(username=activation.user.username, token=activation.token)\n elif field == 'sms_key':\n user = authenticate(username=activation.user.username, code=activation.sms_key)\n else:\n user = None\n activation.delete()\n if user:\n login(request, user)\n messages.success(request, _(\"\"\"Profile activated successfully! You should change your password!\"\"\"))\n return True\n else:\n return False\n else:\n messages.success(request, _(\"\"\"You already have an account!\"\"\"))\n return False", "def activateWebAppUser( self, username, activation_code ):\n try:\n con = self.getMetadataDatabaseConnection()\n user_data = con.cursor()\n\n con.cursor().callproc('verify_user_activation_code', [username, activation_code, user_data])\n row = user_data.fetchone()\n if row:\n con.cursor().callproc('activate_user_account', [username])\n return True\n else:\n return False\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def validate_code(self, client_id, code, client, request, *args, **kwargs):\n client = client or self._clientgetter(client_id)\n log.debug('Validate code for client %r and code %r', client.client_id, code)\n grant = self._grantgetter(client_id=client.client_id, code=code)\n if not grant:\n log.debug('Grant not found.')\n return False\n if hasattr(grant, 'expires') and datetime.datetime.utcnow() > grant.expires:\n log.debug('Grant is expired.')\n return False\n\n request.state = kwargs.get('state')\n request.user = grant.user\n request.scopes = grant.scopes\n return True", "def is_valid(self):\n return self.user.is_authenticated", "def test_auth_code_positive(self, api):\n self.builder.add_user(api.get_user())\n resp = api.login_user(api.get_user().username, api.get_user().password)\n self.builder.del_user(api.get_user())\n assert resp.status_code == 200", "def validate_verification_code(self, device, code):\n #LOGGER.info(f\"Verification code-{code}\")\n device.update({'verificationCode': code, 'trustBrowser': True})\n data = json.dumps(device)\n\n try:\n self.session.post(\n f\"{self.SETUP_ENDPOINT}/validateVerificationCode\",\n params=self.params,\n data=data,\n )\n except PyiCloudAPIResponseException as error:\n LOGGER.info(f\"Verification Error code-{error.code}\")\n if error.code == -21669:\n # Wrong verification code\n return False\n #raise\n\n # Re-authenticate, which will both update the HSA data, and\n # ensure that we save the X-APPLE-WEBAUTH-HSA-TRUST cookie.\n self.authenticate()\n\n return not self.requires_2sa", "def check_auth():", "async def validate_account(self) -> bool:\n raise NotImplementedError", "def _is_valid(self):\n # TODO: Query Google to validate credentials\n return True", "def verify(self, code) -> bool:\n totp = self.__initialize_totp()\n return totp.verify(code)", "def check_user(entry_code):\n\tif len(User.objects.filter(unique_code=entry_code)) == 1:\n\t\treturn(True)\n\telse:\n\t\traise Http404('No users exist with this code.')", "def authenticate_user(data):\n \n try:\n auth_token = data[\"auth_token\"]\n user_token = Token.objects.get(username=data[\"username\"])\n if user_token.token == auth_token:\n return True\n except:\n return False\n return False", "def is_valid(self):\n return self.is_active", "def check_new_payment_authcode(self, request: HttpRequest):\n return self.check_authcode_params(\n request,\n (\n \"RETURN_CODE\",\n \"ORDER_NUMBER\",\n \"SETTLED\",\n \"CONTACT_ID\",\n \"INCIDENT_ID\",\n ),\n )", "def make_user_active(self, user_id, verify_code):\n\n try:\n user = self.get(id=user_id)\n if not user.verify_code or user.verify_code != verify_code or user.verify_code_expire < timezone.now():\n raise Exception('Verification code is invalid or expired.')\n\n # Verification code shouldn't be used again\n user.verify_code = None\n user.is_verified = True\n user.is_active = True\n user.save()\n\n except get_user_model().DoesNotExist:\n raise Exception('Password reset code is invalid or expired.')", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def check_auth(self):\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n # Throws AuthenticationFailed if invalid credentials but\n # does not deduct from the rate limit.\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False", "def verify_otp(self, session, identifier, code):\n attempt = session.query(LoginAttempt).filter_by(identifier=identifier).first()\n conditions = [\n attempt,\n attempt.is_valid(),\n TOTPManager(attempt.user).verify(code),\n ]\n if not all(conditions):\n raise InvalidOTP\n return True", "def check_authcode_params(self, request: HttpRequest, params: Iterable[str]):\n is_valid = True\n auth_code_calculation_values = [\n request.GET[param_name]\n for param_name in params\n if param_name in request.GET\n ]\n correct_auth_code = self.calculate_auth_code(\n \"|\".join(auth_code_calculation_values)\n )\n auth_code = request.GET[\"AUTHCODE\"]\n if not hmac.compare_digest(auth_code, correct_auth_code):\n logger.warning('Incorrect auth code \"{}\".'.format(auth_code))\n is_valid = False\n return is_valid", "def validate_email(self, data):\n user = account_models.User.objects.filter(username__iexact=data, is_active=True)\n if user:\n return data\n raise serializers.ValidationError(\"Email address not verified for any user account\")", "def is_active(self):\n return self.status == ACTIVE_USER", "def test_func(self):\n return self.request.user.is_active # any active user", "def activate_user(username, code, new_pass):\r\n\r\n qry = Activation.query.\\\r\n filter(Activation.code == code).\\\r\n filter(User.username == username)\r\n\r\n res = qry.first()\r\n\r\n if UserMgr.acceptable_password(new_pass) and res is not None:\r\n user = res.user\r\n user.activated = True\r\n user.password = new_pass\r\n res.activate()\r\n\r\n LOG.debug(dict(user))\r\n\r\n return True\r\n else:\r\n return None", "def verify(self):\n ACTIVATION_PERIOD = datetime.timedelta(days=14)\n if not self.org_verified:\n self.org_verified = True\n if not self.is_active:\n if not self.activation_code:\n self.activation_code = random_url_safe_code()\n self.activate_by = datetime.datetime.utcnow() + ACTIVATION_PERIOD\n import messaging # avoid circular import\n messaging.send_activation_emails(self)\n self.save()", "def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True" ]
[ "0.6985009", "0.66595393", "0.6610769", "0.6547535", "0.6466121", "0.64294356", "0.6392488", "0.6385686", "0.62999517", "0.61672914", "0.61525494", "0.6117841", "0.6110402", "0.6084039", "0.6073808", "0.6050932", "0.60406595", "0.6035373", "0.60296535", "0.5996132", "0.5988983", "0.5976035", "0.59584045", "0.5907783", "0.58729875", "0.58711743", "0.58660686", "0.5865895", "0.5857746", "0.5844538" ]
0.707134
0
This validates that the redirect_uri provided is registered to the client in your datastore Should return True or False
def verify_redirect_uri(self, client_id, redirect_uri): raise NotImplementedError( """ verify_redirect_uri must be implemented by a child class """ )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):\n request.client = request.client or self._clientgetter(client_id)\n client = request.client\n if hasattr(client, 'validate_redirect_uri'):\n return client.validate_redirect_uri(redirect_uri)\n return redirect_uri in client.redirect_uris", "def is_redirect_uri(self, application_url, url):\n redirect_uri = self._construct_url([application_url, self.base_url_path,\n self.redirect_uri])\n path_url = url.partition('?')[0]\n return redirect_uri == path_url", "def _verify_redirect_uri(self, areq):\n try:\n _redirect_uri = unquote(areq[\"redirect_uri\"])\n\n part = urlparse(_redirect_uri)\n if part.fragment:\n raise URIError(\"Contains fragment\")\n\n (_base, _query) = splitquery(_redirect_uri)\n if _query:\n _query = parse_qs(_query)\n\n match = False\n for regbase, rquery in self.cdb[str(areq[\"client_id\"])][\"redirect_uris\"]:\n # The URI MUST exactly match one of the Redirection URI\n if _base != regbase:\n continue\n\n if not rquery and not _query:\n match = True\n break\n\n if not rquery or not _query:\n continue\n\n # every registered query component must exist in the\n # redirect_uri\n is_match_query = True\n for key, vals in _query.items():\n if key not in rquery:\n is_match_query = False\n break\n\n for val in vals:\n if val not in rquery[key]:\n is_match_query = False\n break\n\n if not is_match_query:\n break\n\n if not is_match_query:\n continue\n\n match = True\n break\n\n if not match:\n raise RedirectURIError(\"Doesn't match any registered uris\")\n # ignore query components that are not registered\n return None\n except Exception:\n logger.error(\"Faulty redirect_uri: %s\" % areq[\"redirect_uri\"])\n try:\n _cinfo = self.cdb[str(areq[\"client_id\"])]\n except KeyError:\n try:\n cid = areq[\"client_id\"]\n except KeyError:\n logger.error(\"No client id found\")\n raise UnknownClient(\"No client_id provided\")\n else:\n logger.info(\"Unknown client: %s\" % cid)\n raise UnknownClient(areq[\"client_id\"])\n else:\n logger.info(\"Registered redirect_uris: %s\" % sanitize(_cinfo))\n raise RedirectURIError(\"Faulty redirect_uri: %s\" % areq[\"redirect_uri\"])", "def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):\n client = client or self._clientgetter(client_id)\n log.debug('Confirm redirect uri for client %r and code %r.', client.client_id, code)\n grant = self._grantgetter(client_id=client.client_id, code=code)\n if not grant:\n log.debug('Grant not found.')\n return False\n if hasattr(grant, 'validate_redirect_uri'):\n return grant.validate_redirect_uri(redirect_uri)\n log.debug('Compare redirect uri for grant %r and %r.', grant.redirect_uri, redirect_uri)\n\n testing = 'OAUTHLIB_INSECURE_TRANSPORT' in os.environ\n if testing and redirect_uri is None:\n # For testing\n return True\n\n return grant.redirect_uri == redirect_uri", "def verify(self):\n if self.geturl():\n return True\n return False", "def valid_url(self):\r\n if self.resolver:\r\n return True\r\n return False", "def test_missing_redirect_uris(superuser):\n form = RegisterForm(superuser, name='Client',\n description='OAuth2 Client',\n is_confidential=choice([True, False]),\n default_scopes='read write')\n\n assert form.validate() is False\n assert _('This field is required.') in form.redirect_uris.errors", "def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False", "def _is_valid(self):\n # TODO: Query Google to validate credentials\n return True", "def verify_auth_request(self, *args, **kwargs):\n if len(args) == 1:\n url = args[0]\n qs = get_query_string(url)\n response_type = qs.pop('response_type', None)\n client_id = qs.pop('client_id', None)\n redirect_uri = qs.pop('redirect_uri', None)\n scope = qs.pop('scope', None)\n state = qs.pop('state', None)\n\n elif len(args) == 2:\n response_type = args[0]\n client_id = args[1]\n\n redirect_uri = kwargs.pop('redirect_uri', None)\n scope = kwargs.pop('scope', None)\n state = kwargs.pop('state', None)\n\n if not client_id: \n return self.invalid_request(\n error_description = 'client_id is required'\n , redirect_uri = redirect_uri\n , state = state\n )\n\n if not response_type:\n return self.invalid_request(\n error_description = 'response_type is required'\n , redirect_uri = redirect_uri\n , state = state\n )\n\n is_client_id_valid = self.verify_client_id(client_id)\n\n if not is_client_id_valid:\n return self.unauthorized_client(\n redirect_uri = redirect_uri\n , state = state\n )\n\n\n if redirect_uri == None:\n redirect_uri = self.get_redirect_uri(client_id)\n\n is_redirect_uri_valid = self.verify_redirect_uri(client_id,\n redirect_uri)\n\n if not is_redirect_uri_valid:\n return self.invalid_request()\n\n is_scope_valid = self.verify_scope(scope)\n\n if not is_scope_valid:\n return self.invalid_scope(\n redirect_uri = redirect_uri\n , state = state\n )\n\n is_authenticated = self.authenticate_user()\n\n if not is_authenticated:\n return self.access_denied(\n redirect_uri = redirect_uri\n , state = state\n )\n\n if response_type == 'code':\n # We are doing 4.1.1\n code = self.generate_authorization_code()\n\n # Save information to be used to validate later requests\n self.save_auth_code(\n client_id\n , code\n , scope\n , redirect_uri\n )\n\n new_qs = {'code': code}\n\n if state:\n new_qs['state'] = state\n\n return {\n 'redirect_uri': clean_url(redirect_uri, new_qs,\n should_force_ssl=self.should_force_ssl\n )\n }\n\n elif response_type == 'token':\n # We are doing 4.2.1\n token = self.generate_access_token()\n\n self.save_auth_token(token, None)\n\n # don't issue a refresh token in this mode\n\n #TODO: If scope is different than requested, return it\n\n return {'access_token': token }\n else:\n return self.unsupported_response_type(\n redirect_uri = redirect_uri\n , state = state\n )", "def validate_url(self):\n pass", "def validate_redirect_uri(value):\n sch, netloc, path, par, query, fra = urlparse(value)\n if not (sch and netloc):\n raise InvalidRedirectURIError()\n if sch != 'https':\n if ':' in netloc:\n netloc, port = netloc.split(':', 1)\n if not (netloc in ('localhost', '127.0.0.1') and sch == 'http'):\n raise InsecureTransportError()", "def is_redirect(response: aiohttp.ClientResponse) -> bool:\n return response.status in (300, 301, 302, 303, 307)", "def test_authorize_no_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['redirect_uri']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.NO_REDIRECT_URI,\n response.json['error_description'])", "def test_valid_authorize_request(self):\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n # Assert that this is a redirect response\n self.assertEqual(303, response.status_code)\n\n # Assert that the redirect request goes to launchpad.\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n # Check the URL\n conf_openid_url = CONF.oauth.openid_url\n self.assertEqual(conf_openid_url, location[0:len(conf_openid_url)])\n\n # Check OAuth Registration parameters\n self.assertIn('fullname', parameters['openid.sreg.required'][0])\n self.assertIn('email', parameters['openid.sreg.required'][0])\n\n # Check redirect URL\n redirect = parameters['openid.return_to'][0]\n redirect_url = urlparse.urlparse(redirect)\n redirect_params = urlparse.parse_qs(redirect_url[4])\n\n self.assertIn('/openid/authorize_return', redirect)\n self.assertEqual(random_state,\n redirect_params['state'][0])\n self.assertEqual(self.valid_params['redirect_uri'],\n redirect_params['sb_redirect_uri'][0])", "def is_valid(self, bundle, request=None):\n return super(BookmarkResource, self).is_valid(bundle, request)", "def verify_request(self, request, client_address):\n\t\treturn True", "def _assert_redirect_url(self, response, expected_redirect_url):\n response_dict = json.loads(response.content.decode('utf-8'))\n assert 'redirect_url' in response_dict, (\n \"Response JSON unexpectedly does not have redirect_url: {!r}\".format(\n response_dict\n )\n )\n assert response_dict['redirect_url'] == expected_redirect_url", "def test_authorize_invalid_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n invalid_params['redirect_uri'] = 'not_a_valid_uri'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.INVALID_REDIRECT_URI,\n response.json['error_description'])", "def assert_redirect_to_register_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertEqual('/' + pipeline.AUTH_ENTRY_REGISTER, response.get('Location'))", "def redirect_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_uri\")", "def redirect_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_uri\")", "def redirect_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_uri\")", "def redirect_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_uri\")", "def redirect_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_uri\")", "def redirect_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_uri\")", "def redirect_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_uri\")", "def redirect_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_uri\")", "def assert_redirect_to_provider_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertTrue(response.has_header('Location'))", "async def test_verify_redirect_uri() -> None:\n assert await indieauth.verify_redirect_uri(\n None, \"http://ex.com\", \"http://ex.com/callback\"\n )\n\n with patch.object(indieauth, \"fetch_redirect_uris\", return_value=[]):\n # Different domain\n assert not await indieauth.verify_redirect_uri(\n None, \"http://ex.com\", \"http://different.com/callback\"\n )\n\n # Different scheme\n assert not await indieauth.verify_redirect_uri(\n None, \"http://ex.com\", \"https://ex.com/callback\"\n )\n\n # Different subdomain\n assert not await indieauth.verify_redirect_uri(\n None, \"https://sub1.ex.com\", \"https://sub2.ex.com/callback\"\n )" ]
[ "0.80277336", "0.7236486", "0.71824515", "0.68947905", "0.6661132", "0.6546087", "0.6481456", "0.6375012", "0.629049", "0.62775546", "0.61968976", "0.6091642", "0.59686536", "0.59132814", "0.5844726", "0.5840806", "0.58395845", "0.58297205", "0.57988995", "0.57804567", "0.57276547", "0.57276547", "0.57276547", "0.57276547", "0.57276547", "0.57276547", "0.57276547", "0.57276547", "0.57146114", "0.57094234" ]
0.7555295
1
The tags associated with the Message. Could be None.
def tags(self) -> Optional[dict]: return self._tags
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tags(self) -> Optional[Any]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"tags\")", "def tags(self):\n return self.get(\"tags\")", "def tags(self) -> dict:\n\n return self._tags or None # store trivial tags as empty (for iteration), return as None", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self):\n return self._tags", "def tags(self) -> Optional[Mapping[str, Any]]:\n return pulumi.get(self, \"tags\")", "def tags(self):\n if not hasattr(self, \"_tags\"):\n self._parse_tags()\n return self._tags", "def tags(self):\n return self._item.get(\"tags\")", "def tags(self) -> Sequence[str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[Mapping[str, Sequence[str]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> dict:\n return self._tags", "def tags(self):\n return self.__tags[:]", "def tags(self):\n return self._changeset.get('tags', None)", "def tags(self) -> Sequence[str]:\r\n return self._tags", "def get_tags(self):\n return self.tags", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self):\n return tuple([x.strip() for x in self._dict.get('tags').split(',')])", "def tags(self) -> Optional[Sequence['outputs.EventSubscriptionTag']]:\n return pulumi.get(self, \"tags\")" ]
[ "0.7801532", "0.7703974", "0.7688504", "0.7630333", "0.7615479", "0.7615479", "0.7615479", "0.7615479", "0.7615479", "0.7615479", "0.7615479", "0.7615479", "0.75930405", "0.75923294", "0.75471735", "0.74428624", "0.74302846", "0.7429466", "0.7429466", "0.7425446", "0.7413681", "0.73889923", "0.7379134", "0.73302674", "0.7278863", "0.7278863", "0.7278863", "0.7278863", "0.7270023", "0.7263226" ]
0.7742009
1
|coro| Method which retrieves stream information on the channel, provided it is active (Live). Returns
async def get_stream(self) -> dict: data = await self._http.get_streams(channels=[self.name]) try: return data[0] except IndexError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_stream(self) -> dict:\n return await self.channel.get_stream()", "def stream(self):\n\t\tdata = self._client.get(\"streams\", self.name)['stream']\n\t\tif data is not None:\n\t\t\tdata.pop('channel', None)\n\t\treturn data", "def get_stream(self, channel_name):\n self.stream = json.loads(Stream().channel(channel_name).text)", "def channel_is_streaming(self, channel_name = ''): \n \n self.get_stream(channel_name)\n stream_json = self.stream['stream']\n if stream_json is None:\n return False\n else:\n print(stream_json['channel']['name'])\n print(stream_json['game'])\n print(stream_json['viewers'])\n print(stream_json['created_at'])\n return True", "def is_stream_live(username=None):\n\tchannel_data = get_info(username, use_fallback=False)\n\treturn channel_data and channel_data['live']", "def get_realtime_stream(self):\n ws = 0\n url = WS_URL % (self.sense_monitor_id, self.sense_access_token)\n try:\n ws = create_connection(url, timeout=self.wss_timeout, sslopt={'ciphers': 'DEFAULT@SECLEVEL=1'})\n while True: # hello, features, [updates,] data\n result = json.loads(ws.recv())\n if result.get('type') == 'realtime_update':\n data = result['payload']\n self.set_realtime(data)\n yield data\n except WebSocketTimeoutException:\n raise SenseAPITimeoutException(\"API websocket timed out\")\n finally:\n if ws: ws.close()", "def get_livechat_channel_info(self):\n self.ensure_one()\n if self.channel_id:\n return self.channel_id.sudo().get_livechat_info()\n return {}", "def get(self, stream):\n\n return self._streams[stream]", "def get_channel_stream(self, stream_args, origin):\n\n return self.origins.origins_dict[origin].get_channel_stream(self.get_channel_dict(\"number\", stream_args[\"channel\"]), stream_args)", "def get(self, public_id):\n channel = get_channel_state(public_id)\n if not channel:\n api.abort(404)\n else:\n return channel", "async def stream_source(self):\n return self._stream_source", "def _get_stream(\n session: \"Session\", url_tail: str, params: Optional[Dict[str, Any]] = None\n) -> Any:\n response = _get(session, url_tail, params, stream=True)\n response.raw.decode_content = True\n return response.raw", "def update(self):\n try:\n response = self.client.describe_stream(StreamName=self.stream)\n except Exception as exc:\n logger.exception(f'Failure trying to get stream: \"{self.stream}\".', exc)\n else:\n if response['ResponseMetadata']['HTTPStatusCode'] != 200:\n logger.error(f'Failure to describe stream \"{self.stream}\": {response}')\n else:\n self.description = response['StreamDescription']", "def get_stream(self):\n self.lock.acquire()\n stream=self.stream\n self.lock.release()\n return stream", "def get(self, *args, **kwargs):\n self._stat.http_stream_open += 1\n # Create subscription for the stream\n url = self.request.uri\n self._logger.info(\"HTTP Stream connection %s %s\", url, self)\n\n auth_header = self.request.headers.get(\"Authorization\")\n if auth_header is None:\n self.finish()\n raise ValueError(\"no authorization header present\")\n\n async_future = asyncio.async(\n self.netconf_subscribe(\n self.request.uri,\n auth_header), \n loop=self._asyncio_loop)", "def is_active(self):\n\t\tself.stream.is_active()", "def get_streaming_information():\n\n #getting the guidebox_id variable from show_page.html\n guidebox_id = request.args.get(\"guidebox_id\")\n\n #gathering information about where the show's available online\n all_streaming_sources = guidebox_streaming_sources_info(guidebox_id)\n\n return jsonify(all_streaming_sources)", "def test_finds_live_stream(self):\n username = 'darth-vader'\n user = create_profile(username)\n\n now = timezone.now()\n streams = [\n {\n 'author': user,\n 'airs_on': now.replace(hour=(now.hour - 1)),\n 'ends_on': now.replace(hour=(now.hour + 1)),\n 'title': 'Live Stream',\n 'added_on': now\n },\n ]\n create_streams(streams)\n\n url = reverse('main_app:user', args=(username,))\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.context['live_stream'])\n self.assertEqual(response.context['live_stream'].title, 'Live Stream')", "def is_stream(self):\r\n return self.stream", "def is_active(self):\n return self.stream.is_active()", "def livestream(self, irc, msg, args, things):\n channels=things\n \n headers = utils.web.defaultHeaders\n islive=[]\n out=[]\n usernames=['tafilms','chickenstewgaming']\n if things:\n channels=usernames+\" \"+things\n else:\n channels=usernames\n\n opts = {}\n opts['channel']=','.join(channels)\n \n for c in channels:\n searchurl = 'http://x%sx.api.channel.livestream.com/2.0/livestatus.json' % c.replace('_','-')\n fd = utils.web.getUrlFd(searchurl, headers)\n\n json = simplejson.load(fd)\n fd.close()\n\n if not json:\n # Most likely no streams are live\n pass\n else:\n if json['channel']:\n if json['channel']['isLive']:\n channelurl='http://www.livestream.com/%s' % c\n out.append('%s' % channelurl)\n if out:\n irc.reply(' || '.join(out))\n else:\n irc.reply('No current live streams.')", "def getChannelResponse(self):\n \n \n return self.channel_response", "async def get_live_streamers(self) -> 'Response':\n headers = {\n 'Content-Type': 'application/json',\n }\n response = await self._client.request(method=RequestMethods.GET,\n url=USERS_LIVE_STREAMERS_URL,\n headers=headers)\n return response", "def getChannel(self):\r\n return self.channel", "def test_get_stream(self):\n pass", "def getSelected(self):\n\n return self._streams[self._selectedStream]", "def get_stream(self):\n result = self.stream\n self.stream = \"\"\n return result", "def is_active(self) -> bool:\n return self._stream.active", "def presence(self, params=None, timeout=None):\n params = params or {}\n path = '/channels/%s/presence' % self.__name\n return self.__ably._get(path, params=params, timeout=timeout).json()", "def info(self):\n unparsed = [x for x in self.run_command('info') if x != '|']\n try:\n streams = [x.split(' ')[2] for x in [x for x in unparsed if x[0] == '+'][:-1]]\n except:\n raise ParseError(\"Could not get streams.\")\n out_list = []\n start = 1\n for stream in streams:\n cur_stream = {'Stream': stream}\n first_char = '|'\n while first_char == '|':\n cur_stream[unparsed[start].split(': ')[0][2:]] = ''.join(unparsed[start].split(': ')[1:])\n start += 1\n first_char = unparsed[start][0]\n start += 1\n out_list.append(cur_stream)\n return out_list" ]
[ "0.8051792", "0.7381777", "0.665298", "0.6583245", "0.6575644", "0.63711935", "0.63618785", "0.626605", "0.62416095", "0.6187988", "0.6113333", "0.60948426", "0.6018285", "0.5974652", "0.59557825", "0.5927819", "0.5906061", "0.58414173", "0.5837924", "0.5830262", "0.5758679", "0.57511026", "0.57485527", "0.5735288", "0.5660973", "0.5634362", "0.5576277", "0.55475223", "0.5532222", "0.55123466" ]
0.77008027
1
A boolean indicating whether the User is Turbo. Could be None if no Tags were received.
def is_turbo(self) -> bool: return self.turbo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_television(self) -> bool:\n if self.client_name() in ('Kylo', 'Espial TV Browser'):\n return True\n return TV_FRAGMENT.search(self.user_agent) is not None", "def is_bot(self) -> undefined.UndefinedOr[bool]:", "def has_tags(self):\n return bool(self.tags)", "def is_tentative(self):\n return self.state == TrackState.Tentative", "def is_tentative(self):\n return self.state == TrackState.Tentative", "def assumed_state(self):\n if self.tahoma_device.type.startswith(\"rts\"):\n return True\n\n return False", "def is_team(self):\n return self._tag == 'team'", "def tachycardic(tachycardia):\n if tachycardia is True:\n is_tachycardic = \"User is tachycardic\"\n else:\n is_tachycardic = \"User is NOT tachycardic\"\n return is_tachycardic", "def should_tag_amis(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"should_tag_amis\")", "def is_bot(self) -> bool:\n if self._bot is not None:\n return hasattr(self, 'ubot')\n return bool(Config.BOT_TOKEN)", "def should_tag_enis(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"should_tag_enis\")", "def is_valid(self):\n if self.user_tag:\n return self.interface.is_tag_available(self.user_tag)\n return True", "def has_ta(self, user, allow_superusers=True):\n return (user.is_superuser and allow_superusers) or len(self.tas.filter(id=user.id)) > 0", "def is_bot(self):\n return self._is_bot", "def is_mentor(self):\n return self.user_profile_status == self.MENTOR", "def check_tags(self):\n if(self.tags is None or not self.tags.get('subscriber', False)):\n self.filters |= Filters.NonSubs\n\n if(self.tags is None or not self.tags.get('user-type', 0) > 0):\n self.filters |= Filters.NonMods", "def otter(cls):\n return cls.autograder_format == \"otter\"", "def is_bot(self) -> bool:", "def tethering_disabled(self):\n return self._tethering_disabled", "def testBoolValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'[email protected]')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, True))", "def isGasBoiler(self):\n if self.getTER1() == 0 and self.getTER2() == 0:\n return 1 #gas boiler\n else:\n return 0", "def is_active_user(self):\n\n return self.is_active", "def is_strobe(self):\n if self._driver is None and not self._strobers:\n raise ValueError(\n 'internal %s is not driven by anything' % self._name)\n return bool(self._strobers)", "def is_actor():\n return False", "def is_variant(self):\n return bool(self.gt_type)", "def test_user_is_study_tagger_true(self):\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertTrue(context['user_is_study_tagger'])", "def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0", "def enable_tpu(self) -> bool:\n return pulumi.get(self, \"enable_tpu\")", "def get_treasury_status(self) -> bool:\n return self._open_treasury.get()", "def is_active(self):\n return self.status == ACTIVE_USER" ]
[ "0.5975257", "0.5843303", "0.5841019", "0.5770941", "0.5770941", "0.5764691", "0.57447207", "0.57420564", "0.5726559", "0.5702824", "0.56921756", "0.56321627", "0.55923325", "0.5584644", "0.55655676", "0.55470127", "0.5535996", "0.5490517", "0.54521006", "0.53535885", "0.5321126", "0.5306595", "0.52999336", "0.5299284", "0.52762", "0.52508676", "0.5242411", "0.5228808", "0.5221881", "0.5197386" ]
0.6886303
0
A boolean indicating whether the User is a subscriber of the current channel. Could be None if no Tags were received.
def is_subscriber(self) -> bool: return self.subscriber
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_subscriber(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.subscribers:\n return True\n return False", "def is_subscriber(self):\n try:\n return self.get_subscription().get('@type') != 'free'\n except Exception:\n # If can't retrieve, assume not paired and not a subscriber yet\n return False", "def is_subscribed(self) -> bool:\n return bool(self._subscriptions)", "def is_subscribed(self, inst, channel):\r\n if channel not in self._channels:\r\n return False\r\n return inst in self._channels[channel].subscribers", "def get_is_subscribed(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return None\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n return profile in obj.subscribed_users.all()", "def user_in_channel(self, server_id, user):\n srv = self.get_server_dict(server_id)\n return user.voice.voice_channel and srv['voice'] and user.voice.voice_channel == srv['voice'].channel", "def isSubscribed(self, path):\n return self._getMailbox(path).metadata.get('subscribed', False)", "def has_subscribers(cls, topic):\n\t\tif (cls.all().filter('topic_hash =', utils.sha1_hash(topic))\n\t\t\t\t.filter('subscription_state =', cls.STATE_VERIFIED).get() is not None):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def isFullySubscribed(self) -> bool:\n\t\treturn self.subscribedCount == len(self.subscribedTopics)", "def is_subscribed(user_id, profile_user_id):\n\n subscription = Subscription.query.filter(\n Subscription.user_id == user_id,\n Subscription.subscribe_to_id == profile_user_id\n ).first()\n print(\"IS SUBSCRIBED\")\n print(subscription)\n print(subscription is not None)\n return subscription is not None", "def _subscribed(self, account_id):\n sql = \"\"\"SELECT 1 FROM hive_subscriptions\n WHERE community_id = :community_id\n AND account_id = :account_id\"\"\"\n return bool(DB.query_one(\n sql, community_id=self.community_id, account_id=account_id))", "def is_session_in_topic(cls) -> bool:\n return True", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def is_voicemail(self):\n return self._is_voicemail", "def is_channel(self):\n return True", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "def subscribe_user(self, user):\n self.ensure_one()\n if self.has_user(user):\n # already subscribed\n return False\n return self.emulate_request(user)", "def subscription_required(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"subscription_required\")", "def get_subscription(self, article: BeautifulSoup):\n if self.parsing_template.subscription and article.select_one(self.parsing_template.subscription):\n return True\n return False", "def subscription_required(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"subscription_required\")", "def is_user_event(self):\n return self._is_user_event", "def is_user_channel_member(channel_id, u_id):\n for selected_id in database.get_channel_data(channel_id)[\"member_ids\"]:\n if selected_id == u_id:\n return True\n return False", "def _is_active_subscription(self, topic: str) -> bool:\n return topic in self._simple_subscriptions or any(\n other.topic == topic for other in self._wildcard_subscriptions\n )", "def subscribers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BudgetActionSubscriberArgs']]]]:\n return pulumi.get(self, \"subscribers\")", "def is_valid(self):\n if self.user_tag:\n return self.interface.is_tag_available(self.user_tag)\n return True", "def joined(self):\n return str(self) in holder.bot.conn.channels.keys()", "def in_voice(self, server_id):\n srv = self.get_server_dict(server_id)\n return srv['voice'] and srv['voice'].channel", "def is_in_conference(self) -> bool:", "def is_participant(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.event.participants:\n return True\n return False", "def is_active(self, channel):\n return bool(int(self.bus.ask('sel:%s?' % channel)))" ]
[ "0.7942302", "0.7375643", "0.7002319", "0.689127", "0.6655376", "0.66528237", "0.6612391", "0.62456584", "0.6236883", "0.6226055", "0.6186541", "0.6153759", "0.60777044", "0.6073368", "0.60588056", "0.6031138", "0.59170586", "0.58796746", "0.57811135", "0.5758006", "0.57514685", "0.5749134", "0.57415634", "0.57249296", "0.5707193", "0.56739944", "0.5670411", "0.5661678", "0.5632463", "0.56097895" ]
0.77070755
1
The badges associated with the User. Could be an empty Dict if no Tags were received.
def badges(self) -> dict: return self._badges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRobloxBadges(userId):\n url = f\"https://accountinformation.roblox.com/v1/users/{userId}/roblox-badges\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def get(self) -> Iterable[Union[Mapping, int, None]]:\n badges = self.client.get_badges()\n return marshal({'badges': badges}, badges_fields), HTTPStatus.OK", "def getBadgesFromUser(userId, limit=None):\n url = f\"https://badges.roblox.com/v1/users/{userId}/badges?limit={limit}&sortOrder=Desc\"\n acceptableLimits = (10, 25, 50, 100)\n if limit in acceptableLimits:\n r = requests.get(url)\n j = json.loads(r.text)\n data = j['data']\n return data\n else:\n if limit == None:\n warnings.warn('You did not specify a limit. The default limit is 100, and other valid limits are 10, 25, and 50.')\n limit = 100\n r = requests.get(url)\n j = json.loads(r.text)\n data = j['data']\n return data\n else:\n e = Exception(\"You have entered an invalid limit, please enter a limit of 10, 25, 50, or 100. If you don't enter a limit at all however, the default is 100.\")\n return", "def _get_badges(user_ids: Set[UserID], brand_id: BrandID\n ) -> Dict[UserID, Set[Badge]]:\n badges_by_user_id = badge_service.get_badges_for_users(user_ids,\n featured_only=True)\n\n def generate_items():\n for user_id, badges in badges_by_user_id.items():\n selected_badges = {badge for badge in badges\n if badge.brand_id in {None, brand_id}}\n yield user_id, selected_badges\n\n return dict(generate_items())", "def get_user_interests(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n cursor.execute(\"SELECT tag FROM \" + ENV_DB +\r\n \".UserTags WHERE username='\" + self.user.username + \"'\")\r\n data = cursor.fetchall()\r\n database.close()\r\n\r\n self.most_interested = sorted([i[0] for i in data])\r\n return self.most_interested", "def getBadgeInfo(badgeId):\n url = f\"https://badges.roblox.com/v1/badges/{badgeId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def get_badges(self) -> List:\n LOGGER.info('Get all badges')\n\n with self.client.create_session() as session:\n badges = session.query(RDSBadge).all()\n\n results = []\n for badge in badges:\n results.append(Badge(badge_name=badge.rk,\n category=badge.category))\n\n return results", "def tags(self):\r\n url = self.base_url + 'tags/'\r\n return json.loads(self.bb.load_url(url))", "def badges(self):\n for test_type, status_getter in [('image', image_status_msg), ('hash', hash_status_msg)]:\n status = getattr(self, f'{test_type}_status')\n if (\n (status == 'missing') or\n (self.status == 'failed' and status == 'match') or\n (self.status == 'passed' and status == 'diff')\n ): # Only show if different to overall status\n yield {'status': status, 'svg': test_type, 'tooltip': status_getter(status)}", "def tags(self) -> Optional[dict]:\n return self._tags", "def tags(self):\n return self.get(\"tags\")", "def get_sample_attrs(self):\n\n return {\n 'entity_type': 'BadgeClass',\n 'entity_id': 'cTjxL52HQBiSgIp5JuVq5x',\n 'open_badge_id': 'https://api.badgr.io/public/'\n 'badges/cTjxL52HQBiSgIp5JuVq5x',\n 'created_at': '2019-09-04T19:03:24Z',\n 'created_by': 'Lj__badge_creator__VIB',\n 'description': 'sample badge description',\n 'issuer': '5D__sample_issuer__4Kg',\n 'issuer_open_badge_id': 'https://api.badgr.io/'\n 'public/issuers/5D__sample_issuer__4Kg',\n 'name': 'Sample badge for Unit Tests',\n 'image': 'https://media.badgr.io/uploads/badges/'\n 'issuer_badgeclass_'\n '488caae0-6fb7-42b5-b94e-d4ea0ac7d22d.png',\n 'alignments': [],\n 'expires': {'amount': None, 'duration': None},\n 'criteria_narrative': 'Sample criteria narrative text',\n 'criteria_url': 'http://example.com/',\n 'tags': ['python', 'unit-test'],\n 'extensions': {}\n }", "def test_badge_should_have_tags(self):\n\n badge = self.get_sample_badge()\n # It's a string, even though it is used as a URL\n self.assertIsInstance(badge.tags, list)", "def datadog_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceIntegrationEndpointDatadogUserConfigDatadogTagArgs']]]]:\n return pulumi.get(self, \"datadog_tags\")", "def tags(self) -> Optional[Any]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> dict:\n return self._tags", "def getBadgeAwardedTime(userId, badgeId):\n url = f\"https://badges.roblox.com/v1/users/{userId}/badges/awarded-dates?badgeIds={badgeId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j['data']", "def tags(self) -> dict:\n\n return self._tags or None # store trivial tags as empty (for iteration), return as None", "def get_sample_badge(self):\n\n badgr = self.get_badgr_setup()\n with vcr.use_cassette('tests/vcr_cassettes/badge_retrieval.yaml'):\n return badgr.badges[0]", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MembershipTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def userlog_tags(self):\n url = (yield self.get_sitemap())['userlogs'] + '/tags'\n response = yield self._http_client.fetch(url)\n raise tornado.gen.Return(json.loads(response.body))", "def datadog_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceIntegrationDatadogUserConfigDatadogTagArgs']]]]:\n return pulumi.get(self, \"datadog_tags\")", "def get_sample_award_badge_data(self):\n return {\n \"recipient\": {\n \"identity\": \"[email protected]\"\n },\n \"notify\": True,\n \"evidence\": [{\n \"url\": \"http://example.com/\",\n \"narrative\": \"Joe completed all...\"\n }]\n }", "def tags(self):\n return self._item.get(\"tags\")", "def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def tags(self):\n\t\treturn sorted(self.__tags, key=lambda tag: tag.age)", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def getUserCounts(tag, lastNesting=False):\n # Sprawdz czy bez lastNesting (jeszcze bool w wywol fun. w collectDatebase) jest szybciej.\n uc, tagDict = {}, {}\n # Petla po wszystkich wejsciach\n for p1 in pydelicious.get_tagposts(tag):\n user = p1['user']\n if user:\n uc.setdefault(user, 0)\n uc[user] += 1\n # tagDict = {tagDict.setdefault(p2['tags'].replace (' ', '_'), 1) for p2 in pydelicious.get_userposts(p1['user']) if p2['tags']}\n if lastNesting:\n break\n for p2 in pydelicious.get_userposts(p1['user']):\n if p2['tags']:\n tagDict.setdefault(p2['tags'].replace(' ', '_'), 1)\n return (user, uc), tagDict", "def tags(self) -> Optional[Mapping[str, Any]]:\n return pulumi.get(self, \"tags\")" ]
[ "0.6866523", "0.65146655", "0.63965136", "0.62054354", "0.58355975", "0.58312416", "0.5766076", "0.5588374", "0.5582012", "0.54887116", "0.5471924", "0.54631025", "0.541698", "0.5362237", "0.5343415", "0.5336226", "0.5312547", "0.5305591", "0.5285464", "0.52728367", "0.5263448", "0.52494925", "0.52479476", "0.5230391", "0.5221857", "0.520969", "0.51955664", "0.51955664", "0.5189687", "0.5184083" ]
0.7892412
0
|coro| Method which retrieves stream information on the channel stored in Context, provided it is active (Live Returns dict Dict containing active streamer data. Could be None if the stream is not live. Raises HTTPException Bad request while fetching streams.
async def get_stream(self) -> dict: return await self.channel.get_stream()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_stream(self) -> dict:\n\n data = await self._http.get_streams(channels=[self.name])\n\n try:\n return data[0]\n except IndexError:\n pass", "def stream(self):\n\t\tdata = self._client.get(\"streams\", self.name)['stream']\n\t\tif data is not None:\n\t\t\tdata.pop('channel', None)\n\t\treturn data", "def _get_stream(\n session: \"Session\", url_tail: str, params: Optional[Dict[str, Any]] = None\n) -> Any:\n response = _get(session, url_tail, params, stream=True)\n response.raw.decode_content = True\n return response.raw", "async def get_live_streamers(self) -> 'Response':\n headers = {\n 'Content-Type': 'application/json',\n }\n response = await self._client.request(method=RequestMethods.GET,\n url=USERS_LIVE_STREAMERS_URL,\n headers=headers)\n return response", "def is_stream_live(username=None):\n\tchannel_data = get_info(username, use_fallback=False)\n\treturn channel_data and channel_data['live']", "def get_realtime_stream(self):\n ws = 0\n url = WS_URL % (self.sense_monitor_id, self.sense_access_token)\n try:\n ws = create_connection(url, timeout=self.wss_timeout, sslopt={'ciphers': 'DEFAULT@SECLEVEL=1'})\n while True: # hello, features, [updates,] data\n result = json.loads(ws.recv())\n if result.get('type') == 'realtime_update':\n data = result['payload']\n self.set_realtime(data)\n yield data\n except WebSocketTimeoutException:\n raise SenseAPITimeoutException(\"API websocket timed out\")\n finally:\n if ws: ws.close()", "def test_finds_live_stream(self):\n username = 'darth-vader'\n user = create_profile(username)\n\n now = timezone.now()\n streams = [\n {\n 'author': user,\n 'airs_on': now.replace(hour=(now.hour - 1)),\n 'ends_on': now.replace(hour=(now.hour + 1)),\n 'title': 'Live Stream',\n 'added_on': now\n },\n ]\n create_streams(streams)\n\n url = reverse('main_app:user', args=(username,))\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.context['live_stream'])\n self.assertEqual(response.context['live_stream'].title, 'Live Stream')", "def get_stream(self, channel_name):\n self.stream = json.loads(Stream().channel(channel_name).text)", "def get_streaming_information():\n\n #getting the guidebox_id variable from show_page.html\n guidebox_id = request.args.get(\"guidebox_id\")\n\n #gathering information about where the show's available online\n all_streaming_sources = guidebox_streaming_sources_info(guidebox_id)\n\n return jsonify(all_streaming_sources)", "def get_stream_stats(self, stream, isCollectionStream=False, local=False):\n if isCollectionStream is False:\n if local is True:\n stream = \"c8locals.\" + stream\n else:\n stream = \"c8globals.\" + stream\n if local is True:\n endpoint = '{}/{}/stats?global=False'.format(ENDPOINT, stream)\n elif local is False:\n endpoint = '{}/{}/stats?global=True'.format(ENDPOINT, stream)\n\n request = Request(method='get', endpoint=endpoint)\n\n def response_handler(resp):\n code = resp.status_code\n if resp.is_success:\n return json.loads(resp.body['result'])\n elif code == 403:\n raise ex.StreamPermissionError(resp, request)\n raise ex.StreamConnectionError(resp, request)\n\n return self._execute(request, response_handler)", "def get(self, stream):\n\n return self._streams[stream]", "def simulate_get_stream(self, path='/', **kwargs):\n\n kwargs['_stream_result'] = True\n\n return _AsyncContextManager(self.simulate_request('GET', path, **kwargs))", "def update(self):\n try:\n response = self.client.describe_stream(StreamName=self.stream)\n except Exception as exc:\n logger.exception(f'Failure trying to get stream: \"{self.stream}\".', exc)\n else:\n if response['ResponseMetadata']['HTTPStatusCode'] != 200:\n logger.error(f'Failure to describe stream \"{self.stream}\": {response}')\n else:\n self.description = response['StreamDescription']", "def channel_is_streaming(self, channel_name = ''): \n \n self.get_stream(channel_name)\n stream_json = self.stream['stream']\n if stream_json is None:\n return False\n else:\n print(stream_json['channel']['name'])\n print(stream_json['game'])\n print(stream_json['viewers'])\n print(stream_json['created_at'])\n return True", "def get(self, *args, **kwargs):\n self._stat.http_stream_open += 1\n # Create subscription for the stream\n url = self.request.uri\n self._logger.info(\"HTTP Stream connection %s %s\", url, self)\n\n auth_header = self.request.headers.get(\"Authorization\")\n if auth_header is None:\n self.finish()\n raise ValueError(\"no authorization header present\")\n\n async_future = asyncio.async(\n self.netconf_subscribe(\n self.request.uri,\n auth_header), \n loop=self._asyncio_loop)", "def get_livechat_channel_info(self):\n self.ensure_one()\n if self.channel_id:\n return self.channel_id.sudo().get_livechat_info()\n return {}", "def get_api_stream(url, params=None, headers=None):\n\n logging.debug(\"-> get_api_stream()\")\n logging.debug(\"Request url: %s\" % url)\n\n result = requests.get(url, params=params, headers=headers)\n\n logging.debug(\"Response content: %s\" % result.content)\n logging.debug(\"<- get_api_stream()\")\n\n return result", "def _get_live_time(self):\n channel = SOCKET_ARGS['channel']\n url = 'https://api.twitch.tv/kraken/streams/{}'.format(channel.lower())\n for attempt in range(5):\n try:\n r = requests.get(url)\n r.raise_for_status()\n start_time_str = r.json()['stream']['created_at']\n start_time_dt = datetime.datetime.strptime(start_time_str, '%Y-%m-%dT%H:%M:%SZ')\n now_dt = datetime.datetime.utcnow()\n time_delta = now_dt - start_time_dt\n time_dict = {'hour': None,\n 'minute': None,\n 'second': None,\n }\n\n time_dict['hour'], remainder = divmod(time_delta.seconds, 3600)\n time_dict['minute'], time_dict['second'] = divmod(remainder, 60)\n for time_var in time_dict:\n if time_dict[time_var] == 1:\n time_dict[time_var] = \"{} {}\".format(time_dict[time_var], time_var)\n else:\n time_dict[time_var] = \"{} {}s\".format(time_dict[time_var], time_var)\n time_dict['stream_start'] = start_time_dt\n time_dict['now'] = now_dt\n except requests.exceptions.HTTPError:\n continue\n except TypeError:\n self._add_to_chat_queue('Sorry, the channel doesn\\'t seem to be live at the moment.')\n break\n except ValueError:\n continue\n else:\n return time_dict\n else:\n self._add_to_chat_queue(\n \"Sorry, there was a problem talking to the twitch api. Maybe wait a bit and retry your command?\")", "async def stream_source(self):\n return self._stream_source", "def get_channel_stream(self, stream_args, origin):\n\n return self.origins.origins_dict[origin].get_channel_stream(self.get_channel_dict(\"number\", stream_args[\"channel\"]), stream_args)", "def get(self, id_stream):\n\n session = current_app.session\n\n stream = session.query(StreamDao).filter(StreamDao.id == id_stream).first()\n\n if stream is None:\n return None, 204\n\n return stream, 200", "def info_by_stream_id(stream_id):\n binding = {'stream_id': stream_id}\n url = 'https://sitestream.twitter.com/2b/site/c/01_225167_334389048B872A533002B34D73F8C29FD09EFC50/info.json'\n url = url.format(**binding)\n return _TwitterRequest('GET',\n url,\n 'streaming:c',\n 'get-c-stream-id-info',\n binding)", "def get_community_live_statuses_get(self, modeHash, page, partnershipType, sort, streamLocale):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/CommunityContent/Live/All/{partnershipType}/{sort}/{page}/\"))", "def realtimestreaming_streams(self, **kwargs):\n url_path = 'realtimestreaming/streams'\n self.logger.debug(\"Get list of stream names\")\n return self._common_get(url_path, parameters=kwargs)", "def stream_api(self):\n (conn, c) = self.__db_init('stream')\n api = self.__api_init()\n\n class MyStreamListener(tweepy.StreamListener):\n \n def __init__(self, api=None):\n self.api = api\n \n def on_status(self, status):\n print(status.id, status.created_at)\n c.execute('''INSERT OR IGNORE INTO tweets (id, date) \n VALUES (?, ?)''' , (status.id, status.created_at))\n conn.commit()\n \n def on_error(self, status_code):\n if status_code == 401:\n print('Bad Authentication data.' + \n ' Update api.ini with your proper credentials:')\n print(os.path.abspath(\n _path_finder('userconfig','api.ini')))\n return False #Disconnect the stream.\n elif status_code == 420:\n print('Error 420')\n return False #Disconnect the stream.\n else:\n print('Got an error with status code:', str(status_code))\n time.sleep(321)\n return True #Continue listening.\n\n print('Press Ctrl+C to exit stream')\n myStream = tweepy.Stream(auth = api.auth, \n listener = MyStreamListener()) #Create Stream\n myStream.filter(track=[self.keyword], async=False) #Start Stream", "def livestream(self, irc, msg, args, things):\n channels=things\n \n headers = utils.web.defaultHeaders\n islive=[]\n out=[]\n usernames=['tafilms','chickenstewgaming']\n if things:\n channels=usernames+\" \"+things\n else:\n channels=usernames\n\n opts = {}\n opts['channel']=','.join(channels)\n \n for c in channels:\n searchurl = 'http://x%sx.api.channel.livestream.com/2.0/livestatus.json' % c.replace('_','-')\n fd = utils.web.getUrlFd(searchurl, headers)\n\n json = simplejson.load(fd)\n fd.close()\n\n if not json:\n # Most likely no streams are live\n pass\n else:\n if json['channel']:\n if json['channel']['isLive']:\n channelurl='http://www.livestream.com/%s' % c\n out.append('%s' % channelurl)\n if out:\n irc.reply(' || '.join(out))\n else:\n irc.reply('No current live streams.')", "def get_stream(self):\n self.lock.acquire()\n stream=self.stream\n self.lock.release()\n return stream", "def get(self, public_id):\n channel = get_channel_state(public_id)\n if not channel:\n api.abort(404)\n else:\n return channel", "def is_stream(self):\r\n return self.stream", "def get_video_stream(yt, resolution):\n global adaptive\n\n resolution_itag = {'360p':134, '480p':135, '720p':136}\n progressive_streams = yt.streams.filter(progressive=True)\n video_stream = progressive_streams.get_by_resolution(resolution)\n\n if video_stream is not None:\n return video_stream\n else:\n adaptive_streams = yt.streams.filter(adaptive=True, type='video')\n video_itag = resolution_itag[resolution]\n video_stream = adaptive_streams.get_by_itag(video_itag)\n adaptive = True\n return video_stream" ]
[ "0.75702065", "0.67921305", "0.6257685", "0.62386304", "0.6161859", "0.61304474", "0.609498", "0.6094159", "0.60804814", "0.5905744", "0.5871197", "0.5746808", "0.5739209", "0.57316095", "0.5689709", "0.5653746", "0.5613071", "0.5573039", "0.55681455", "0.5562832", "0.5516599", "0.5494293", "0.54759806", "0.5456965", "0.54030776", "0.5401684", "0.53968996", "0.53871006", "0.5384697", "0.53647566" ]
0.7385613
1
Sets up the standard island map.
def setup_maps(self): super().setup_maps() sprite_classes = { "Obstacles": Wall, "Background": QuestSprite, } self.add_map(TiledMap(resolve_resource_path("images/island/island.tmx"), sprite_classes))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_maps(self):\n super().setup_maps()\n sprite_classes = {\n \"walls\": Wall,\n \"play\": Background,\n \"exit\": Background,\n }\n island_map = TiledMap((\"images/qwerty_game_1.tmx\"), sprite_classes)\n self.add_map(island_map)", "def __init__(self, island_map):\n self.map = island_map\n self.cells = None\n self.array_to_island()\n self.herbivores_on_island = None\n self.carnivores_on_island = None", "def __init__(self, island_map):\n self.island_map = island_map\n self.landscape_dict = {'M': Mountain,\n 'O': Ocean,\n 'J': Jungle,\n 'S': Savannah,\n 'D': Desert}", "def _standard_mapping(self):\n mapping_raw = scipy.io.loadmat(join(self.dataset_dir, 'scripts/mapping.mat'))\n self.camvidMap = mapping_raw['camvidMap'] * 255\n self.cityscapesMap = mapping_raw['cityscapesMap'] * 255", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'", "def initialize(self) -> None:\n self.simulation = self.initialize_simulation()\n width, height = get_window_resolution()\n display_dim = ((0, width), (0, height))\n self.coord_mapper = CoordinateMapper2D(*self.simulation.dim, *display_dim)\n self.simple_pygame.all_sprites.empty()\n self.initialize_visualization()", "def generate_map(self):\n\n # Create main streets first\n self.create_main_streets()\n\n # Then create the commercial buildings in the center of town\n self.create_commercial_center()\n\n # Then create the neighborhoods that populate the rest of the city\n while(self.create_neighborhood()):\n pass\n\n # Clean up any invalid buildings that were created\n self.delete_inaccessible_buildings()", "def setup(self):\n # TODO : figure out how to make the map interface a singleton class\n\n if not hasattr(self, 'mapInterface'):\n self.mapInterface = MapInterface(settings['FILE_CONFIG']['filename'])", "def _reset_map(self):\n if not self.scenario_name.startswith('random'):\n self._map = self._fixed_original_map.copy()\n else:\n from environment.scenarios import generate_random_map\n self._map = generate_random_map(self.scenario_name)\n\n # Precompute wall channel and positions since they are static\n self._walls_channel = (self._map == WALL).astype(int)\n xs, ys = np.where(self._walls_channel)\n self._wall_positions = list(zip(xs, ys))\n\n # Set avatar position bidirectional caches (first thieves then guardians)\n xs_t, ys_t = np.where(self._map == THIEF)\n xs_g, ys_g = np.where(self._map == GUARDIAN)\n xs = np.concatenate([xs_t, xs_g])\n ys = np.concatenate([ys_t, ys_g])\n for avatar_id, (x, y) in enumerate(zip(xs, ys)):\n self._id2pos[avatar_id] = x, y\n self._pos2id[(x, y)] = avatar_id\n\n self._chased_treasure_pos = _coords_where(self._map == TREASURE)\n self._chased_thief_id = 0", "def _makeimap(self):\n self.map_['source'] = 'NAOJ'\n self.map_['provider'] = 'NRO'\n self.map_['instrument'] = 'NORH'\n self.map_['phyobs'] = ''", "def _set_folium_map(self):", "def map_constructor(self):\n self.island_map = self.island_map.split('\\n')\n\n for n in range(len(self.island_map)):\n self.island_map[n] = \\\n [character for character in self.island_map[n]]\n\n self.construct_map_coordinates()", "def setUpClass(cls):\n cls.test_map = area.DungeonMap()", "def setUpClass(cls):\n cls.test_map = area.DungeonMap()", "def setUp(self):\n\n _gray_data = {'red': [(0., 0, 0), (1., 1.0, 1.0)],\n 'green': [(0., 0, 0), (1., 1.0, 1.0)],\n 'blue': [(0., 0, 0), (1., 1.0, 1.0)]}\n\n self.colormap = ColorMapper.from_segment_map(_gray_data)\n self.colormap.range = DataRange1D()", "def setupMap(self) :\n\t\tself.Dmap = OnscreenImage(image = 'models/mapTopView.png', \\\n\t\t\t\t\t #pos = (.8,0,.6), scale = .4)\n\t\t\t\t\t pos = (0.8,0,0.6), scale = .4)\n\t\tself.Dmap.setTransparency(TransparencyAttrib.MAlpha)\n\t\tself.dot = OnscreenImage(image = 'models/dot.png', \\\n\t\t\t\t\t pos = (1,0,1), scale = .01)\n\n\t\t# Set the dot's position in the 2d map\n\t\t#self.dot.setPos(0,0,0)\n#\t\t 0.0+self.Dmap.getX(),0, \\\n#\t\t 0.0+self.Dmap.getY())\n\t#\t self.avatarNP.getX()/(self.modelSizeX+0.0+self.Dmap.getX()),0, \\\n\t#\t self.avatarNP.getY()/(self.modelSizeY+0.0+self.Dmap.getY()))\n\t\tself.dot.setPos( \\\n\t\t (self.avatarNP.getX()/(self.modelSizeX))*0.79+0.4, 0, \\\n\t\t (self.avatarNP.getY()/(self.modelSizeY))*0.79+0.21)\n\t\tself.dotOrigin = self.dot.getPos()", "def setUpClass(cls) -> None:\n cls.example_map: FeedlineMapCollection = get_default_map_collection()\n cls.existing_map_id: str = 'S17'\n cls.existing_feedline_nr: int = 0\n cls.not_existing_map_id: str = 'NULL'\n cls.not_existing_feedline_nr: int = -1", "def __init__(self, island_map=None, ini_pop=None, seed=None):\n if seed is not None:\n np.random.seed(seed)\n random.seed(seed)\n else:\n random.seed(1234)\n np.random.seed(987654)\n\n if island_map is None:\n island_map = \"\"\"OOOOOOO\n OJJSJJO\n OJSSSJO\n OJSMSJO\n OJSMSJO\n OJJJJJO\n OOOOOOO\"\"\"\n self.island_map = island_map\n self.island = Island(self.island_map)\n self.island.build_map()\n self.vis_steps = None\n self.img_steps = None\n self.years_sim = 0\n self.heat = None\n if ini_pop is None:\n ini_herbs = [{'loc': (3, 3),\n 'pop': [{'species': 'Herbivore',\n 'age': 5,\n 'weight': 20}\n for _ in xrange(150)]}]\n ini_carns = [{'loc': (3, 3),\n 'pop': [{'species': 'Carnivore',\n 'age': 5,\n 'weight': 20}\n for _ in xrange(40)]}]\n ini_pop = ini_herbs + ini_carns\n self.add_population(ini_pop)\n self.fig = None", "def initialize_undistortion_maps(self):\n\n new_camera_matrix, valid_roi = cv2.getOptimalNewCameraMatrix(\n self.camera_matrix, self.distortion_coefficients, self.image_size,\n 0)\n\n self.map1, self.map2 = cv2.initUndistortRectifyMap(\n self.camera_matrix, self.distortion_coefficients, None,\n new_camera_matrix, self.image_size, cv2.CV_16SC2)", "def reset_map(self):\n self.reset_world(self._filename)", "def initialize_default(self):\n self.initialize_navigation()\n self.initialize_viewport()", "def __init__(self, maps):\n self._maps = maps", "def setup(self):\n build_world.start_level(self)", "def setUp(self):\n super().setUp()\n self.grid, err = xyzgrid.XYZGrid.create(\"testgrid\")\n self.grid.add_maps(self.map_data)\n self.map = self.grid.get_map(self.map_data[\"zcoord\"])\n\n # output to console\n # def _log(msg):\n # print(msg)\n # self.grid.log = _log", "def setup(self):\n\n logger.info('Setting up SimulatedMaps module.')\n\n # Save the cls as a class attribute\n self.cls = self.read_cls()\n\n logger.info('Setup done!')", "def initMaps(self):\r\n assert isinstance(self.CLASSES, (list, tuple))\r\n assert self.CLASSES[0] == \"__background__\"\r\n cls = self.CLASSES\r\n self.name_to_id = dict(zip(cls, range(len(cls))))\r\n self.id_to_name = dict(zip(range(len(cls)), cls))", "def __init__(self, gameMap, initDirec=None, initBodies=None, initTypes=None):\n\t\tself._map = gameMap\n\t\tself._initDirec = initDirec\n\t\tself._initTypes = initTypes\n\t\tself._initBodies = initBodies\n\t\tself.reset(False)", "def initialise(self):\n self.set_up()", "def _set_folium_map(self):\n m = Map(features=[self], width=self._width, height=self._height)\n self._folium_map = m.draw()" ]
[ "0.7265102", "0.69269395", "0.6895909", "0.6665102", "0.6394695", "0.6318594", "0.6241702", "0.61914027", "0.6168343", "0.6157474", "0.6133331", "0.61102325", "0.60776055", "0.59733725", "0.59733725", "0.5957042", "0.5941652", "0.594105", "0.59341586", "0.59300625", "0.5903458", "0.5891098", "0.58755416", "0.5869557", "0.58415323", "0.5803546", "0.57359177", "0.5713932", "0.5700598", "0.56793576" ]
0.72836506
0
As in other examples, assigns all sprites in the "Obstacles" layer to be walls.
def setup_walls(self): self.wall_list = self.get_current_map().get_layer_by_name("Obstacles").sprite_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_walls(self):\n self.wall_list = self.get_current_map().get_layer_by_name(\"walls\").sprite_list", "def add_walls(self):\n for x in range(self.width):\n self.add_thing(Wall(), (x, 0))\n self.add_thing(Wall(), (x, self.height - 1))\n\n for y in range(self.height):\n self.add_thing(Wall(), (0, y))\n self.add_thing(Wall(), (self.width - 1, y))", "def add_walls(self):\n for x in range(self.width + 1):\n if not self.some_things_at((x, 0), Wall):\n self.add_thing(Wall(), (x, 0))\n if not self.some_things_at((x, self.height), Wall):\n self.add_thing(Wall(), (x, self.height))\n\n for y in range(self.height + 1):\n if not self.some_things_at((0, y), Wall):\n self.add_thing(Wall(), (0, y))\n if not self.some_things_at((self.width, y), Wall):\n self.add_thing(Wall(), (self.width, y))\n #self.add_thing(Wumpus(),(1,3))\n #self.add_thing(Pit(),(3,3))\n #self.add_thing(Pit(),(3,1))\n #self.add_thing(Gold(),(2,3))\n #self.add_thing(Pit(),(4,4))", "def create_wall():\n if config.W_LIST == []:\n pos = randint(config.M.x_pos+4, common.R2)\n if common.value_arr(pos, common.MIDS_R) == \" \" and \\\n common.value_arr(pos, common.MIDS_R+1) == \"0\":\n try:\n witem = obstacle.Wall(pos)\n config.W_LIST.append(witem)\n except config.GapHere:\n pass\n\n elif len(config.W_LIST) < int((3*common.COLS)/80):\n if randint(0, 10) == 5:\n # create a obstacle\n pos = config.W_LIST[-1].x_pos + randint(10, 20)\n if pos < common.COLS - 3:\n try:\n witem = obstacle.Wall(pos)\n config.W_LIST.append(witem)\n except config.GapHere:\n pass\n\n else:\n pass", "def __init__(self):\n self.wall_list = pygame.sprite.Group()\n self.enemy_sprites = pygame.sprite.Group()", "def __init__(self, width, height, walls = None):\r\n self.width = width\r\n self.height = height\r\n if walls:\r\n self.walls = walls\r\n else:\r\n self.walls = []\r\n self.goals = []\r\n self.tiles = []\r\n self._clear_map()", "def _draw_walls(self, draw_grid):\n for yi, y in enumerate(self._grid):\n for xi, x in enumerate(y):\n for i, w in enumerate(x.walls):\n if i == 0 and w:\n draw_grid[yi * 2 + 1][xi * 2] = self._wall_color\n if i == 1 and w:\n draw_grid[yi * 2 + 1][xi * 2 + 2] = self._wall_color\n if i == 2 and w:\n draw_grid[yi * 2][xi * 2 + 1] = self._wall_color\n if i == 3 and w:\n draw_grid[yi * 2 + 2][xi * 2 + 1] = self._wall_color\n return draw_grid", "def make_boundary_wall(self, height, width) -> None:\n for x in range(0, width):\n Wall(self, x, 0)\n Wall(self, x, height - 1)\n for y in range(1, height - 1):\n Wall(self, 0, y)\n Wall(self, width - 1, y)", "def _makeWall(self, x, y):\n\t\tif self._isEmpty(x, y):\n\t\t\tself.setWall(x, y)\n\t\t\tif (x, y) not in self._walls:\n\t\t\t\tself._walls.append((x, y))", "def draw_obstacles(self):\n for obstacle in self.obstacles:\n obstacle.draw(self.window, Colors.BLACK.value)", "def reset_obstacles(self):\n self.obstacles = np.array([])", "def __init__(self, i, j):\n pygame.sprite.Sprite.__init__(self)\n #self.image = pygame.Surface([30,30])\n #self.image.fill(self.wallColor)\n self.image = pygame.image.load('stone_wall.png').convert_alpha()\n self.pos = (i*30,j*30,)\n self.rect = pygame.Rect(i*30,j*30,30,30)\n self._layer = 2", "def getInitialObstacles():\n # hardcode number of blocks\n # will account for movemnet\n from random import choice\n from globals import TILEWIDTH, TILEHEIGHT, WINHEIGHT, TILEFLOORHEIGHT, LEVEL, HALFWINWIDTH\n\n no_of_blocks = 50\n for b in range(no_of_blocks // 2):\n # get image\n # image = globals.IMAGESDICT['rock']\n for y in range(1,5):\n image = globals.IMAGESDICT[choice(['ugly tree', 'rock', 'tall tree'])]\n # make rect\n spaceRect = pygame.Rect((b * TILEWIDTH, y * TILEFLOORHEIGHT, TILEWIDTH, TILEFLOORHEIGHT))\n landscape = Landscape(image, spaceRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n image = globals.IMAGESDICT['corner']\n negativeRect = pygame.Rect([-150, WINHEIGHT - TILEHEIGHT, TILEWIDTH, TILEHEIGHT])\n landscape = Landscape(image, negativeRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n image = globals.IMAGESDICT['corner']\n positiveRect = pygame.Rect([LEVEL[0] - TILEWIDTH, WINHEIGHT - TILEHEIGHT, TILEWIDTH, TILEFLOORHEIGHT])\n landscape = Landscape(image, positiveRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n bottomRect = pygame.Rect([HALFWINWIDTH, LEVEL[1] - TILEHEIGHT, TILEWIDTH, TILEFLOORHEIGHT])\n landscape = Landscape(image, bottomRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n for x in range(0, LEVEL[0], 50):\n for y in range(10):\n image = globals.IMAGESDICT[choice(['ugly tree', 'rock', 'tall tree'])]\n spaceRect = pygame.Rect((x, LEVEL[1] - (y * TILEHEIGHT), TILEWIDTH, TILEFLOORHEIGHT))\n landscape = Landscape(image, spaceRect)\n if choice([0,1,0]):\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n\n return", "def spawn_obstacles(self):\n self.obstacle_sprites.empty()\n number_of_obstacles = random.randint(MIN_OBSTACLES, MAX_OBSTACLES)\n while len(self.obstacle_sprites) < number_of_obstacles:\n obstacle = Obstacle(random.randrange(0, WIDTH), random.randrange(HEIGHT - 500, HEIGHT))\n obstacle_collision = pygame.sprite.spritecollide(obstacle, self.obstacle_sprites, False)\n if not obstacle_collision:\n self.obstacle_sprites.add(obstacle)", "def walls(self, walls):\n\n self.container['walls'] = walls", "def draw_obstacles():\n for obstacle in obstacles:\n plt.gca().add_patch(obstacle)", "def recreate_obstacles(self):\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.obstacles = self.create_obstacles()", "def south_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, y+height / 2, s), 0)\r\n self.walls.append(swall)\r\n model = Plane(w=swall.w()*2, h=swall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, swall.x(),swall.y(),swall.z(), rx=0.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1", "def create_wall_shape(self):\n self.shape_walls = arcade.ShapeElementList()\n self.shape_walls.center_x = 0\n self.shape_walls.center_y = 0\n self.shape_walls.angle = 0\n\n point_list = []\n color_list = []\n \n # create the walls into a single shape\n walls = self.game.walls\n for wall in walls:\n points = self.get_entity_dimensions(wall)\n point_list.append(points[0])\n point_list.append(points[1])\n point_list.append(points[2])\n point_list.append(points[3])\n \n # as we have 4 points\n for i in range(4):\n color_list.append(COLOUR_MAP[wall.base_colour])\n \n self.shape_walls.append(\n arcade.create_rectangles_filled_with_colors(point_list, color_list)\n )", "def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects", "def create_outer_walls(space,width,height):\n static_lines = [pymunk.Segment(space.static_body, (0.0, 0.0), (width, 0.0), 0.0),\n pymunk.Segment(space.static_body, (width, 0.0), (width, height), 0.0),\n pymunk.Segment(space.static_body, (width, height), (0.0, height), 0.0),\n pymunk.Segment(space.static_body, (0.0, 600.0), (0.0, 0.0), 0.0)]\n for line in static_lines:\n line.friction = 0.5\n line.elasticity = 0.9\n\n return static_lines", "def get_walls(world):\r\n return set(((x,y) for x in range(world.get_width()) for y in range(world.get_height()) if world.is_wall((x,y))))", "def isWall(mapObj, x, y):\n if x < 0 or x >= len(mapObj) or y < 0 or y >= len(mapObj[x]):\n return False # x and y aren't actually on the map.\n elif mapObj[x][y] in ('#', 'x'):\n return True # wall is blocking\n return False", "def north_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n nwall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, y + height / 2, n), 0)\r\n self.walls.append(nwall)\r\n model = Plane(w=nwall.w()*2, h=nwall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, nwall.x(), nwall.y(), nwall.z())\r\n\r\n\r\n wallnum += 1", "def updateHardObstacles(self):\r\n global_obs = self.calcGlobalObstaclePosition([[10, 20],[10, 0],[10, -20]])\r\n self.globalHardObstaclesList.extend(global_obs)", "def setup_maps(self):\n super().setup_maps()\n sprite_classes = {\n \"walls\": Wall,\n \"play\": Background,\n \"exit\": Background,\n }\n island_map = TiledMap((\"images/qwerty_game_1.tmx\"), sprite_classes)\n self.add_map(island_map)", "def create_and_add_horiontal_walls_to_list(row_start: int, row_end: int, y: int, wall_list: arcade.SpriteList) -> None:\n #loop creation of wall sprites\n for x in range(row_start * wall_size, row_end * wall_size, wall_size):\n wall = arcade.Sprite(\":resources:images/tiles/boxCrate_double.png\", wall_scaling)\n wall.left = x\n wall.bottom = y * wall_size\n wall_list.append(wall)", "def make_board(self):\n http = urllib3.PoolManager()\n r = http.request('GET', 'http://www.cse.msu.edu/~ruppmatt/itm891/tiles.pickle')\n tiles = pickle.loads(r.data)\n self.assets = tiles\n self.gameboard = Image.new('RGBA', (64*(self.world_width+2), 64*(self.world_height+2)))\n # Laydown land\n for c in range(0,self.world_width):\n for r in range(0, self.world_height):\n x = (c+1)*64\n y = (r+1)*64\n tile_ndx = np.random.choice(len(tiles['land']))\n self.gameboard.paste(tiles['land'][tile_ndx], (x,y)) \n # Laydown water\n for c in range(0,self.world_width):\n x = (c+1)*64\n yy = (self.world_height+1)*64\n self.gameboard.paste(tiles['water']['edge_north'], (x,0))\n self.gameboard.paste(tiles['water']['edge_south'], (x, yy))\n for r in range(0,self.world_height):\n y = (r+1)*64\n xx = (self.world_width+1)*64\n self.gameboard.paste(tiles['water']['edge_west'], (0,y))\n self.gameboard.paste(tiles['water']['edge_east'], (xx,y))\n self.gameboard.paste(tiles['water']['corner_nw'], (0,0))\n self.gameboard.paste(tiles['water']['corner_sw'], (0,(self.world_height+1)*64))\n self.gameboard.paste(tiles['water']['corner_ne'], ((self.world_width+1)*64,0))\n self.gameboard.paste(tiles['water']['corner_se'], ((self.world_width+1)*64,(self.world_height+1)*64))\n \n # Some land lines\n draw = ImageDraw.Draw(self.gameboard)\n for c in range(0,self.world_width-1):\n y_1 = 64\n y_2 = 64*(self.world_height+1)\n x = (2+c)*64\n draw.line([(x,y_1),(x,y_2)], fill='white', width=1)\n for r in range(0,self.world_height-1):\n y = (2+r)*64\n x_1= 64\n x_2 = 64 * (self.world_width+1)\n draw.line([(x_1,y),(x_2,y)], fill='white', width=1)\n return", "def reset(self) -> None:\n self.map = []\n for col in range(self.width):\n self.map.append([])\n for cell in range(self.height):\n if col > 1 and col < self.width - 2:\n if cell == 0:\n # World Barrier - Top Middle\n self.map[col].append(StaticTile('wall_3', self.graphicsLibrary.get('wall_3'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif cell == self.height - 1:\n # World Barrier - Bottom Middle\n self.map[col].append(StaticTile('wall_12', self.graphicsLibrary.get('wall_12'), (self.scaleWidth,self.scaleHeight), barrier=True))\n else:\n # Playable Map Area\n if (col % 2) != 0 and (cell % 2) == 0:\n # Hard-Barrier Generation\n self.map[col].append(StaticTile('solid', self.graphicsLibrary.get('solid'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif (col,cell) in self.spawn_buffers:\n # Preserve Potential Spawn Points\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n elif random.randint(0, 2) == 0:\n # Soft-Barrier Generation\n self.map[col].append(DynamicTile('destructable_new', self.graphicsLibrary.get('destructable_new'), (self.scaleWidth,self.scaleHeight), destructable=\"True\", barrier=True, death_animation=self.animations_library.get('destructable_death')))\n else:\n # Fill Remaining Terrain\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n else:\n # World Barrier - Side Sections\n if col == 0 or col == self.width - 1:\n # Roof\n right_most_columns = False\n if col == self.width - 1:\n right_most_columns = True\n\n if cell == self.height - 1:\n self.map[col].append(StaticTile('wall_10', self.graphicsLibrary.get('wall_10'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_5', self.graphicsLibrary.get('wall_5'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif col == 1 or col == self.width - 2:\n # Floor \n right_most_columns = False\n if col == self.width - 2:\n right_most_columns = True\n\n if cell == self.height -1:\n self.map[col].append(StaticTile('wall_11', self.graphicsLibrary.get('wall_11'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_9', self.graphicsLibrary.get('wall_9'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_2', self.graphicsLibrary.get('wall_2'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 1:\n self.map[col].append(StaticTile('wall_6', self.graphicsLibrary.get('wall_6'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_7', self.graphicsLibrary.get('wall_7'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n self.map[col][cell].place_at(topleft=(self.scaleWidth * col, self.scaleHeight * cell))", "def set_channel_walls(self,walls=['left','right','top','bottom']):\n solid_list_a = np.empty(0).flatten()\n solid_list_b = np.empty(0).flatten()\n solid_list_c = np.empty(0).flatten()\n solid_list_d = np.empty(0).flatten()\n\n for w in walls:\n if w=='right':\n solid_list_a = np.array(np.where((self.x==0.))).flatten()\n elif w=='left':\n solid_list_b = np.array(np.where((self.x == self.Lx_p))).flatten()\n elif w=='top':\n solid_list_d = np.array(np.where((self.y == self.Ly_p))).flatten()\n elif w=='bottom':\n solid_list_c = np.array(np.where((self.y == 0.))).flatten()\n\n solid_list = np.array(np.union1d(solid_list_a,solid_list_b)); \n solid_list = np.array(np.union1d(solid_list,solid_list_c))\n self.solid_list = np.array(np.union1d(solid_list,solid_list_d))" ]
[ "0.7613916", "0.67729795", "0.6738331", "0.63915414", "0.626012", "0.6249966", "0.62025875", "0.6169142", "0.61558133", "0.61539805", "0.60857564", "0.6060428", "0.60447943", "0.60199165", "0.6016821", "0.6001422", "0.59767026", "0.5943057", "0.59379256", "0.5924689", "0.58118486", "0.5799548", "0.5797187", "0.5795116", "0.5783052", "0.5774154", "0.57728386", "0.57692116", "0.57597893", "0.57507783" ]
0.81163895
0
wraps a string within single qoute. Mostly needed for insert into database
def wrap_with_in_single_quote(s): return "'{}'".format(s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qstring(self, s):\n\n if '\"' in s or ' ' in s or '\\\\' in s:\n return '\"' + s.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"') + '\"'\n else:\n return s", "def quote(s):\n if isinstance(s, str):\n if \" \" in s or len(s.split()) > 1:\n start, end = s[0], s[-1]\n if start != end or start not in ('\"', \"'\"):\n q1s, q1d, q3s, q3d = \"'\", '\"', 3 * \"'\", 3 * '\"'\n if q1d not in s:\n s = q1d + s + q1d\n elif q1s not in s:\n s = q1s + s + q1s\n elif q3d not in s:\n s = q3d + s + q3d\n elif q3s not in s:\n s = q3s + s + q3s\n return s", "def quot(string):\r\n return string.replace('\"', \"'\")", "def SingleQuote(s):\n return pipes.quote(s)", "def urlQuote(string):\r\n return quote(string.encode(\"utf-8\"))", "def quote(s):\n\n\ts = \"'\" + s.replace(\"'\", \"\"\"'\"'\"'\"\"\") + \"'\"\n\n\t#get rid of gratuitous leading and trailing empty strings\n\tif s.startswith(\"''\"): s = s[2:]\n\tif s.endswith(\"''\"): s = s[:-2]\n\n\treturn s", "def standardise_quotes(self, val):\n if val.startswith(self.altquote) and val.endswith(self.altquote):\n middle = val[1:-1]\n val = \"%s%s%s\" % (self.quote, middle, self.quote)\n\n val = self.escape_quotes(val)\n\n return val", "def shQuote(text):\n\treturn \"'%s'\" % text.replace(\"'\", r\"'\\''\")", "def _quote(v):\n return '\"' + v + '\"' if ' ' in v else v", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def _escapeString(self, value):\n if '\"' in value and \"'\" in value:\n substrings = value.split(\"\\\"\")\n result = [\"concat(\"]\n for substring in substrings:\n result.append(\"\\\"%s\\\"\" % substring)\n result.append(\", '\\\"', \")\n result = result[0:-1]\n if value.endswith('\"'):\n result.append(\", '\\\"'\")\n return \"\".join(result) + \")\"\n\n if '\"' in value:\n return \"'%s'\" % value\n return \"\\\"%s\\\"\" % value", "def encodeLiteral(self, string):\r\n return string.replace(\"'\",\"''\")", "def _quote(self, arg):\n arg = arg.replace('\\\\', '\\\\\\\\')\n arg = arg.replace('\"', '\\\\\"')\n return '\"%s\"' % arg", "def elimenate_quote(string):\n\n for i, c in enumerate(string):\n if i==0:\n begin = c\n end = c \n \n if begin == '\"' and end == '\"':\n return string[1:-1]\n if begin == \"'\" and end == \"'\":\n return string[1:-1] \n \n else:\n return string", "def quoted(val: str) -> str:\n return f'\"{val}\"' if ' ' in val else val", "def mysql_quote(x):\n if not x:\n return \"NULL\"\n x = x.replace(\"\\\\\", \"\\\\\\\\\")\n x = x.replace(\"'\", \"''\")\n x = x.replace(\"\\n\", \"\\\\n\")\n return \"'{}'\".format(x)", "def quote(s):\n # Based on shlex.quote. Bun unlike shlex, it quotes every string and\n # not just the ones that contain unsafe characters.\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def embeded_triple_quotes():\n pass", "def html_dq_safe(string):\n if not string:\n return string\n string = string.replace('\"','&quot;')\n return string", "def escape_quotes(self, val):\n if val.startswith(self.quote) and val.endswith(self.quote):\n # make sure any previously escaped quotes are not re-escaped\n middle = val[1:-1].replace(\"\\\\\" + self.quote, self.quote)\n middle = middle.replace(self.quote, \"\\\\\" + self.quote)\n val = \"%s%s%s\" % (self.quote, middle, self.quote)\n\n return val", "def quoted_string(content: str) -> str:\n if not (QCONTENT > set(content)):\n raise ValueError(f\"bad content for quoted-string {content!r}\")\n return not_qtext_re.sub(lambda x: \"\\\\\" + x.group(0), content)", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return \"'\" + s + \"'\"\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def quoteString(s):\n if s is None:\n return None\n quoted = str(s).replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\")\n return \"'{}'\".format(quoted)", "def _escape_string(s, surrounding_quote='\"'):\n s = s.replace('\\\\', '\\\\\\\\')\n if surrounding_quote == '\"':\n s = s.replace('\"', r'\\\"')\n if surrounding_quote == \"'\":\n s = s.replace(\"'\", r\"\\'\")\n return s", "def argument_quote(argument):\n argument = argument.replace('\"', '\"\"')\n if ' ' in argument:\n argument = argument.replace(\"'\", \"''\")\n argument = \"'\" + argument + \"'\"\n return argument", "def cleaned_string(val):\r\n return urllib.quote_plus(smart_str(val))", "def quote(value):\n single = value.find(\"'\")\n double = value.find('\"')\n multiline = value.find('\\n') != -1\n if multiline or ((single != -1) and (double != -1)):\n if value.find('\"\"\"') == -1 and value[0] != '\"' and value[-1] != '\"':\n s = '\"\"\"%s\"\"\"' % value\n else:\n s = \"'''%s'''\" % value\n elif (single != -1) and (double == -1):\n s = '\"%s\"' % value\n else:\n s = \"'%s'\" % value\n return s", "def quoted(string, every=64):\n return \"> \" + re.sub(\"\\r\\n(?=[^\\r\\n])\", \"\\r\\n> \", string)", "def _quoter(self, col) :\n\n j = self.cols.index(col)\n if self.types[j] == 'TEXT' :\n return '\"%s\"'\n else :\n return '%s'", "def shellquote(arg):\n if re.match('^[-_.:/=a-zA-Z0-9]*$', arg):\n return arg\n else:\n return \"'%s'\" % arg.replace(\"'\", r\"'\\''\")" ]
[ "0.7357636", "0.70734096", "0.6990583", "0.69030076", "0.67781955", "0.66734624", "0.6578122", "0.6562084", "0.65618634", "0.65006495", "0.6492088", "0.6490486", "0.64646506", "0.64400494", "0.64112735", "0.6396136", "0.6389765", "0.638212", "0.6373073", "0.6366212", "0.6354275", "0.6338517", "0.6316134", "0.6310962", "0.6300352", "0.62466913", "0.6239381", "0.6237017", "0.61897874", "0.61808807" ]
0.7279222
1
returns md5 hashed password
def get_hashed_value(password): salt = 'saifulBoss' password = salt + password return md5(password.encode('utf-8')).hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash_password(password):\n return hashlib.md5(password).hexdigest()", "def _create_md5(self, password) -> str:\n md5_hash = hashlib.md5(password.encode(\"utf-8\")).hexdigest()\n self.logger.debug(\"created md5 hash: %s\", md5_hash)\n\n return md5_hash", "def to_hash(password):\n return \"{MD5}%s\" % base64.encodebytes(\n hashlib.md5(str(password).encode()).digest()\n ).strip().decode()", "def hash_password(password):\n password_md5 = hashlib.md5(password.encode('utf-8')).hexdigest()\n for i in range(0, len(password_md5), 2):\n if password_md5[i] == '0':\n password_md5 = password_md5[0:i] + 'c' + password_md5[i + 1:]\n return password_md5", "def hash_password(password):\n salt = hashlib.md5(password.encode())\n return salt.hexdigest()", "def __md5_hash(txt) -> str:\n\n return md5_crypt.hash(txt)", "def get_correct_pw_md5():\n f = open(PASSWORD_FILE, 'r')\n pw_md5 = f.read().strip()\n f.close()\n return pw_md5", "def md5hash(string):\n return hashlib.md5(string).hexdigest()", "def get_md5(text):\n return hashlib.md5(text).hexdigest()", "def __hash_md5__(self, text):\n key = hashlib.md5()\n key.update(text.encode('utf-8'))\n return key.digest()", "def _md5(input):\n m = hashlib.md5()\n m.update(input)\n return m.hexdigest()", "def md5hash(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"md5hash\")", "def get_md5(s):\n m = hashlib.md5()\n m.update(s.encode('utf8'))\n return m.hexdigest()", "def digest_auth_md5(qop=None, user=\"user\", passwd=\"passwd\"):\n return digest_auth(qop, user, passwd, \"MD5\", \"never\")", "def secret_hash(data):\n\n passwords_hash = hashlib.md5(data.encode(\"UTF-8\")).hexdigest()\n \n return passwords_hash", "def calc_md5(string):\n\treturn md5(string).hexdigest()", "def md5hash(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"md5hash\")", "def getpassword(value):\n hashed = \"%s%s\" % (value, SECRET_KEY)\n hasher = hashlib.md5()\n hasher.update(hashed)\n return hasher.hexdigest()[-8:]", "def md5(input_string):\n return hashlib.md5(input_string.encode('utf-8')).hexdigest()", "def _md5sum(data):\n hash = hashlib.md5()\n hash.update(six.b(data))\n hash_hex = hash.hexdigest()\n return hash_hex", "def hash(self, string):\n h = md5()\n h.update(string)\n return h.digest()", "def MD5(self) -> _n_0_t_3[_n_0_t_9]:", "def md5(string: str) -> str:\n\treturn str(hashlib.md5(string.encode()).hexdigest())", "def md5(val):\n return hashlib.md5(val).hexdigest()", "def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()", "def hash_string(password):\n return hash(password)", "def digest(string):\n return md5(string.encode(\"utf-8\")).hexdigest()", "def md_5_hash(i):\n h = hashlib.md5(i.encode('utf-8')).hexdigest()\n return h", "def compute_md5_for_string(string):\n return base64.b64encode(hashlib.md5(string).digest())", "def generate_password_hash(event=None, user_id=None):\n\n suffix_key = f'password{event}'\n hexkey = str.encode(f'{user_id}{suffix_key}')\n\n # md5 value[1:10] + 1\n passwd = '{0}{1}'.format(hashlib.md5(hexkey).hexdigest()[1:10], 1)\n\n return passwd" ]
[ "0.85117745", "0.8031083", "0.7865687", "0.7858549", "0.78492856", "0.77523303", "0.7710148", "0.75820285", "0.7581162", "0.750511", "0.74373454", "0.7422173", "0.7403926", "0.7369566", "0.7347021", "0.7316815", "0.7263541", "0.7245339", "0.7239279", "0.7191727", "0.71888846", "0.718886", "0.7168363", "0.7164048", "0.71385264", "0.70974404", "0.7065541", "0.70596313", "0.7050113", "0.7020699" ]
0.80907196
1
Delete the `n`th element from the supplied list `l` and return the resulting list.
def pop_and_return(l, n): o = l.copy() if (n >= len(l)) or (n < 0): raise ValueError("Index n = %d out of range" % (n)) if len(l) == 0: raise ValueError("The supplied list must contain at least one element.") del o[n] return o
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop(lst, n): # noqa: N805\n for _ in range(n):\n try:\n lst = lst.tail\n except AttributeError:\n break\n return lst", "def del_from_list(l, elements, count=0):\n return _remove_elements_from_list(l, elements, None, count)", "def drop(iterable, n, islice=islice):\n return islice(iterable, n, None)", "def rotate(l: list, n: int) -> list:\n return l[-n:] + l[:-n]", "def drop(n, seq):\n return itertools.islice(seq, n, None)", "def RotateList(p_l: list, p_n: int):\n return p_l[p_n:] + p_l[:p_n]", "def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n dummy = ListNode(0)\n dummy.next = head\n first = dummy\n second = dummy\n\n for i in range(n + 1):\n first = first.next\n\n while first:\n first = first.next\n second = second.next\n\n second.next = second.next.next\n\n return dummy.next", "def nth(_list, n):\n n = lloc(_list, n)\n return [a[n] for a in _list]", "def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n\n if not head or not head.next:\n return None\n\n first_pointer = head\n second_pointer = head\n for i in range(n):\n second_pointer = second_pointer.next\n if not second_pointer:\n return head.next\n\n while second_pointer.next:\n first_pointer = first_pointer.next\n second_pointer = second_pointer.next\n first_pointer.next = first_pointer.next.next\n\n return head", "def split_list(l, n):\n n *= 2\n returned_list = [l[i: i + n] for i in range(0, len(l), n)]\n return returned_list", "def drop(iterable, n):\n counter = 0\n for element in iterable:\n if counter < n:\n counter += 1\n else:\n yield element", "def rotate(l, n=1):\n return l[n:] + l[:n]", "def delete_element(some_list, index):\n del some_list[index]\n return some_list", "def remove_from_list(self,list_,index):\r\n try:\r\n return list_.pop(self._index_to_int(index))\r\n except IndexError:\r\n self._index_error(list_,index)", "def chunks(l, n):\n if len(l) % n != 0:\n raise Exception('List length is not a multiple on %s', n)\n return [l[i:i+n] for i in range(0, len(l), n)]", "def rotate(l, n):\n return l[n:] + l[:n]", "def rotate(l, n):\n return l[n:] + l[:n]", "def partition(lis: list, n: int):\n # prevent destroying the original dataset\n lis_cp = copy.deepcopy(lis)\n random.shuffle(lis_cp)\n if len(lis) > n:\n return [lis_cp[i::n] for i in range(n)]\n else:\n return [[lis_cp[i]] for i in range(len(lis))]", "def split_list(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def nth_smallest_element(input_list, n):\n\tif n <= 0:\n\t\traise Exception(\"Invalid argument\")\n\n\tdeduped_sorted_list = list(sorted(set(input_list)))\n\treturn deduped_sorted_list[n - 1]", "def remove_elements(l, e):\n return [x for x in l if x != e]", "def chunk(l, n=500):\n return [l[i:i+n] for i in range(0, len(l), n)]", "def mutate_list_1(lst, size):\r\n count = 0\r\n while count < size:\r\n elem = lst[0]\r\n lst.remove(elem)\r\n lst.append(elem)\r\n count += 1\r\n return lst", "def split_list_into_sublists_of_size_n(lst, n):\n return [lst[i : i + n] for i in range(0, len(lst), n)]", "def rm(x, l):\n return [y for y in l if x != y]", "def chunk(lst, n):\n return [lst[i:i + n] for i in range(0, len(lst), n)]", "def take(self, n): # noqa: N805\n return List(_islice(self, n))", "def partition(self, lst, n):\n division = len(lst) / float(n)\n return [lst[int(round(division * i)): int(round(division * (i + 1)))] for i in xrange(n)]", "def delete_at_index(self, index: int) -> T:\n try:\n previous_node = self.__get_node_at_index(index-1)\n except ValueError as e:\n if self.is_empty(): \n raise ValueError(\"List is empty\")\n elif index == 0:\n item = self.head.items\n self.head = self.head.link\n else:\n raise e\n else:\n item = previous_node.link.items\n previous_node.link = previous_node.link.link\n self.length -= 1\n return item", "def pop(self, n):\n try:\n self._load(False)\n except KeyError:\n return\n\n # Delete the items we no longer need,\n # and most importantly decrease self.count\n key = (self.head - self.count) % self.size\n while n > 0 and self.count > 0:\n del self.db[key]\n key += 1\n if key == self.size:\n key = 0\n n -= 1\n self.count -= 1\n self.db['count'] = self.count" ]
[ "0.76152366", "0.68506825", "0.68460673", "0.6674738", "0.6519919", "0.6411165", "0.6317477", "0.6257754", "0.6228628", "0.61837226", "0.6085985", "0.6027428", "0.60008734", "0.59967196", "0.5925724", "0.5918213", "0.5918213", "0.59132695", "0.5902579", "0.58495694", "0.58382547", "0.58185476", "0.57872653", "0.57854605", "0.5770071", "0.5759419", "0.5750788", "0.5748499", "0.57459253", "0.57449424" ]
0.78068995
0
Override this to do any servicespecific initialization
def initService(self):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_init(self):\n\n pass", "def init_services(self):\n service_prefix = rospy.get_name() + \"/\"\n\n self._request_components_serv = rospy.Service(service_prefix +\n 'list_components',\n ListComponents,\n self.get_components)\n self._request_fields_serv = rospy.Service(service_prefix +\n 'list_fields',\n ListFields,\n self.get_fields)\n self._request_values_serv = rospy.Service(service_prefix +\n 'request_values',\n RequestValues,\n self.get_values)\n self._unsubscribe_values_serv = rospy.Service(service_prefix +\n 'unsubscribe_values',\n UnsubscribeValues,\n self.unsubscribe_values)", "def _manually_initialize(self) -> None:\n # XXX: maybe refactor, this is actually part of the public interface\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _real_initialize(self):\n pass", "def _init(self, options):\n self._initRuntime(options)\n self._loadConfig() # needs runtime\n self._initGeneral() # needs _config\n self._initGroups() # needs _config and general", "def _setup(self):\n raise NotImplementedError()", "def _init(self):\n pass", "def Initialize(self, deps):\n raise NotImplementedError", "def initialize(self, *args, **kwargs):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def setup_provider(self):\n pass", "def _initialize(self, **kwargs):\n return None", "def init():", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):" ]
[ "0.7318107", "0.721846", "0.70419437", "0.6983712", "0.6983712", "0.6983712", "0.6966745", "0.6950469", "0.69004095", "0.6882781", "0.6850522", "0.6740089", "0.6729622", "0.6729622", "0.6729622", "0.6729622", "0.6729622", "0.6729622", "0.6729622", "0.6729622", "0.6727437", "0.672426", "0.672013", "0.671781", "0.671781", "0.671781", "0.671781", "0.671781", "0.671781", "0.671781" ]
0.8269538
0
Returns a task for the given class `name` or type, or None.
def getTask(self, name): for t in self.tasks: if isinstance(name, str): if t.name == name: return t else: if t.__class__ is name: return t return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_task(self, name):\n res = Task()\n self.GetTask(name, res)\n return res", "def task_by_type(qs, task_type):\n for task in qs:\n if task.type == task_type:\n return task\n return None", "def get_task(self, u_name):\n raise NotImplementedError()", "def task(self, name):\n if name not in self._tasks:\n raise TaskNotFoundError\n\n return self._tasks[name]", "def find_task_class(task_module):\n\n task = None\n\n for obj_key, obj_value in task_module.__dict__.items():\n\n if obj_key in BASE_TASK_CLASSES:\n continue\n elif hasattr(task_module.__dict__[obj_key], '__bases__'):\n if task_module.__dict__[obj_key].__bases__[0] in [Task]:\n task = task_module.__dict__[obj_key]\n break\n\n return task", "def _get_cls(name, cls):\n return cls.get(name, None) if isinstance(cls, dict) else cls", "def get_task_by_name(self, task_name):\n for task in self.tasks:\n if task.name == task_name:\n logger.debug(\"Returning task with name '%s': '%s'\", task_name, task.to_xml_string())\n return task\n raise ValueError(\"A step task with the name {} can not be found.\".format(task_name))", "def get_by_name(task_name):\n return tasks.find_one({'name': task_name})", "def get_one_task_by_name(self, name: str) -> \"Task\": # noqa: F821\n tasks = self.get_tasks_by_name(name)\n if not tasks:\n raise PyDSTaskNoFoundException(f\"Can not find task with name {name}.\")\n return tasks.pop()", "def get_task(self, id=None, name=None):\n query = \"SELECT * FROM tangerine WHERE \"\n if id: query += \"id='\"+str(id)+\"'\"\n elif name: query += \"name='\"+name+\"' AND parent_job IS NULL\"\n else: return None\n \n cur = self.conn.cursor()\n cur.execute(query + \";\")\n self.conn.commit()\n task = cur.fetchone()\n \n if task:\n return Task(self.columns, task);\n else:\n return None", "def requireTask(self, name):\n t = self.getTask(name)\n if t is None:\n raise Exception(\"Task %s not found in service\" % name)\n return t", "def task_type(cls):\n raise NotImplementedError()", "def task_type(cls):\r\n raise NotImplementedError()", "def get_input_task(self, name='0'):\n port = self.get_input(name).other\n if port is None:\n return None\n return port.task", "def get_class(self, name):\n return self.host.get_class(name)", "def get_task(self, key: str) -> Task:\n raise NotImplementedError", "def get_task_by_name(self, task_name):\n task_table = Table('task', self.metadata, autoload=True)\n try:\n parent_task = self.session.query(task_table).filter(task_table.c.name==str(task_name)).one()\n task = parent_task._asdict()\n return task\n except Exception as e:\n logger.info(f\"Error retrieving task {task_name}: {e}\")\n return False", "def get_task(self, code: str) -> \"Task\": # noqa: F821\n if code not in self.tasks:\n raise PyDSTaskNoFoundException(\n \"Task with code %s can not found in process definition %\",\n (code, self.name),\n )\n return self.tasks[code]", "def get_task_from_app(app, name):\n # type: (celery.app.Celery, str) -> celery.app.task.Task\n try:\n task = app.tasks[name]\n\n except KeyError:\n # Some times current app loses its name???\n name = '__main__.' + name.split('.', 1)[1]\n task = app.tasks[name]\n\n return task", "def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)", "def get_class(self, name):\n raise NotImplementedError", "def get_class(self, name: str) -> Type:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'new instance of {name}')\n name = self.default_name if name is None else name\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'creating instance of {name}')\n class_name, params = self._class_name_params(name)\n return self._find_class(class_name)", "def _get_task(self, task):\n try:\n return TASKS[task]\n except KeyError:\n raise ValueError(\"task %s \"\n \"is not supported. \" % task)", "def find_class(self, class_name: str) -> Type:\n pass", "def by_name(name, cls=None):\n\n if cls is None:\n cls = base.Point\n\n if cls.__name__ == name:\n return cls\n\n for c in cls.__subclasses__():\n cc = by_name(name, c)\n if cc is not None:\n return cc\n\n return None", "def _get_task_cls(fn):\n\n if hasattr(fn, \"task_cls\"):\n cls = fn.task_cls\n elif hasattr(fn, \"decorator\") and hasattr(fn.decorator, \"task_cls\"):\n cls = fn.decorator.task_cls\n else:\n cls = asynq.AsyncTask\n\n if cls is None: # @async_proxy()\n return asynq.FutureBase\n else:\n return cls", "def get_task(self, id):\n raise NotImplementedError()", "def get_task_by_mapper(self, mapper, dontcreate=False):\n try:\n return self.tasks[mapper]\n except KeyError:\n if dontcreate:\n return None\n task = UOWTask(self, mapper)\n task.mapper.register_dependencies(self)\n return task", "def get_task(self, id):\n\n collection = self._get_collection()\n\n item = collection.find_one({\"_id\": ObjectId(id)})\n\n if item:\n return _mongo_item_to_task(item)\n else:\n return None", "def task(self, name):\n with self.db_lock:\n return self.rcon.hget(self.task_key, name)" ]
[ "0.7046765", "0.7023009", "0.6943184", "0.6686502", "0.66718686", "0.66415167", "0.6624116", "0.65350837", "0.6532999", "0.6526461", "0.64754677", "0.63859135", "0.6356256", "0.6324332", "0.62372583", "0.62365824", "0.61092407", "0.6106103", "0.61002696", "0.6061775", "0.6025917", "0.59947354", "0.5991187", "0.59675837", "0.5955949", "0.5947895", "0.59421796", "0.5936863", "0.5908349", "0.58951217" ]
0.8375071
0
Request a graceful shutdown. Does not block.
def shutdown(self): self.logger.info("Received graceful shutdown request") self.stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def shutdown_gracefully(self) -> None:", "async def shutdown_gracefully(self) -> None:", "def request_shutdown(self, restart=False):", "def request_shutdown(self, kernel_id, restart=False):", "def shutdown(self):\n self.req_shutdown = True", "def shutdown(self):\n self.shutdown_requested = True", "def initiate_shutdown(self) -> None:", "def shutdown(self):\n try:\n self._request(\"POST /shutdown\")\n time.sleep(0.300)\n except requests.exceptions.ConnectionError:\n pass\n if self._process and self._process.poll() is None:\n self._process.kill()\n if self._session:\n self._session.close()", "def shutdown(self):\n\n raise NotImplementedError", "def _shutdown(self):", "async def shutdown(self) -> int:", "def shutdown(self) -> None:", "def shutdown(self) -> None:", "async def shutdown(self):", "def shutdown():\n os.kill(os.getpid(), signal.SIGTERM)", "def finish_shutdown(self, kernel_id, waittime=None, pollinterval=0.1, restart=False):", "def shutdown():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><shutdown><system></system></shutdown></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def shutdown_kernel(self, now=False, restart=False):", "async def shutdown_requested(self, restart=False) -> None:\n await self.shutdown_listener()", "def shutdown(self):\n self.action('shutdown')", "def shutdown():\n os.system(\"sudo shutdown now\")", "def shutdown(self):\n ...", "def shutdown(self):", "def shutdown(self):\n raise NotImplementedError", "def shutdown():\n shutdown_func = request.environ.get(\n 'werkzeug.server.shutdown') # default web server with flask\n if shutdown_func is None:\n return 'unable to shutdown server!', 501\n shutdown_func()\n return \"server shutting down...\"", "def shutdown() -> None: # TODO Better place for this code\n # TODO Safe landing\n pass", "def shutdown (self, sig=None):\n pass\n #TODO: implement more realistic closing semantics", "def __exit__(self, exc_type, exc_value, traceback): \n self.shutdown()", "def shutdown(self):\n pass", "def shutdown(self):\n pass" ]
[ "0.7409461", "0.7409461", "0.7391829", "0.71747464", "0.70665526", "0.6845416", "0.6801792", "0.6633282", "0.65604484", "0.6521576", "0.65183425", "0.6464831", "0.6464831", "0.64030904", "0.636419", "0.6333115", "0.6330935", "0.63081044", "0.63011456", "0.62946767", "0.62560433", "0.6254438", "0.62338674", "0.62130326", "0.61974394", "0.6184515", "0.6165877", "0.61575645", "0.61238414", "0.61238414" ]
0.7560451
0
Request a graceful restart. Does not block.
def restart(self): self.logger.info("Received graceful restart request") self._restart = True self.stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")", "def attempt_restart(self):\n self.controller.publish(self, 'restart')", "def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True", "def node_restart(ctx):\n ctx.obj['node'].attempt_restart()", "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def restart():\n stop()\n start()", "def _graceful_restart(self, wait):\n\n self._sut.shutdown(True)\n self._sut.start()\n\n if wait:\n sleep(BespokeGlobals.VM_BOOT_WAIT)", "def restart(self):\n\t\treturn self.reset().start()", "def _restart(self):\n pass", "def net_service_restart(self):\n\t\treturn Job(SDK.PrlSrv_NetServiceRestart(self.handle)[0])", "def restart(self):\n self.stop()\n self.start(init=False)", "def restart(self, **kwargs):\n return self.client.api.restart(self.id, **kwargs)", "def schedule_system_restart():\n global _force_system_restart\n _force_system_restart = True", "def restart(self):\n\n self.stop()\n self.start()", "def Restart(self, request, global_params=None):\n config = self.GetMethodConfig('Restart')\n return self._RunMethod(\n config, request, global_params=global_params)", "def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()", "def restart(self):\n self.stop()\n self.start()", "def restart(self):\n self.stop()\n self.start()", "def restart(self) -> None:", "def restart():\n info = request.get_json() or {}\n delay_secs = int(info.get('delay', 0))\n\n t = threading.Timer(delay_secs, update_trigger_file)\n t.start()\n\n return jsonify('Success')", "def restart(config):\n shutdown(config)\n startup(config)\n return", "def restart(self):\n self._start_time = None\n self.start()", "def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()", "def request_shutdown(self, restart=False):", "def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()", "def restart(self):\n pass", "def restart(verbose=False, force=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()", "def restart(self):", "def restart(self):\r\n pass", "def do_restart(self, timeSinceLastInterruption, noRestart=False):\n delta = self.ckptTime\n try:\n self.ProcLog(\"Attempting to restart from ckpt #%d, taken at %d\" % (self.numCkpts, self.lastCheckpointTime))\n self.lostWork += timeSinceLastInterruption\n if not noRestart:\n assert self.broken == True\n yield self.env.timeout(delta)\n # Done with restart without errors\n self.ProcLog(\"Restart successful... going back to compute\")\n except simpy.Interrupt as e:\n if (e.cause == \"failure\"):\n # TODO: Handle failures during a restart\n print(\"Failure in the middle of a restart... will attempt restart again\")\n exit(-1)\n #self.do_restart()" ]
[ "0.7772912", "0.7263124", "0.7245009", "0.71927315", "0.719068", "0.7186406", "0.7109044", "0.7063854", "0.6949366", "0.69067913", "0.6904618", "0.6888579", "0.6879974", "0.68628997", "0.6830718", "0.682366", "0.6802731", "0.6802731", "0.6781907", "0.67575556", "0.6754284", "0.67229503", "0.6705063", "0.66707927", "0.6657327", "0.66459394", "0.6624079", "0.66206986", "0.6618805", "0.66069716" ]
0.8317874
0
Count clip of a gram
def count_clip(gram, grams, reference): clip = 0 n = len(gram.split(' ')) count_wi = 0 for g in grams: if gram == g: count_wi += 1 # print('count_wi:', count_wi) for ref in reference: ref_list = ref.split(' ') count_ref = 0 for i in range(len(ref_list) - n + 1): if gram == ' '.join(ref_list[i:i+n]): count_ref += 1 # print('count_ref: ', count_ref) count = min(count_wi, count_ref) if count > clip: clip = count return clip
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count():", "def cal_pn(grams_set, grams, candidate, reference):\n count = 0\n for gram in grams_set:\n # print(gram)\n count += count_clip(gram, grams, reference)\n # calculate log() for p, so '+10**-8' avoid 'p==0'\n p = count / len(grams) + 10**-8 \n return p", "def clippingcounter(clipping_list, input_dir):\n\t\texcludelist=[]\n\t\t\n\t\t#dicts to store results\n\t\tdicti=defaultdict(float)\n\t\tmatchesdicti=defaultdict(list)\n\t\tresults=[]\n\t\t\n\t\tclipping_list=[re.compile(\"[^web|i]\\W(\"+i+\")\\W\") if i in [\"cams?\", \"sites?\"] else re.compile(\"\\W(\"+i+\")\\W\") for i in clipping_list]\n\t\t#clipping_list=[re.compile(\"\\W(\"+i+\")\\W\") for i in clipping_list]\n\t\tclipping_list=set(clipping_list)\n\t\tprint [i.pattern for i in clipping_list]\n\t\t#iterate and match\n\t\tfor dir in [i for i in os.listdir(input_dir) if not i.startswith(\".\")]:\n\t\t\tprint dir\n\t\t\tfor fili in [i for i in os.listdir(os.path.join(input_dir, dir)) if not i.startswith(\".\")]:\n\t\t\t\twith codecs.open(os.path.join(input_dir, dir, fili), \"r\", \"utf-8\") as inputtext:\n\t\t\t\t\tinputad=ct.adtextextractor(inputtext.read(), fili).lower()\n\t\t\t\t#result is a list of lists which contain matches for each regex/acronym\n\t\t\t\tresult=[([m for m in i.findall(inputad) if not m in excludelist], i.pattern) for i in clipping_list] \n\t\t\t\t# o=[(r,os.path.join(input_dir, dir, fili)) for r in result if len(r[0]) > 2]\n# \t\t\t\tif o:\n# \t\t\t\t\tprint o\n\t\t\t\tresults.append([len(matches) for matches, pattern in result])\n\t\t\t\tfor matches, pattern in result:\n \t\t\t\t\t#the dicti is {pattern:count, pattern: count, ...}\n \t\t\t\t\tdicti[pattern]=dicti[pattern]+len(matches)\n \t\t\t\t\tmatchesdicti[pattern]=matchesdicti[pattern]+matches\n\t\tprint \"\\n\".join([\":\".join((i, str(dicti[i]), \"|\".join(set(matchesdicti[i])))) for i in sorted(dicti, key=dicti.get, reverse=True)])\t\n\t\tfor entry in {k:v for k,v in matchesdicti.items() if v > 10}:\n\t\t\tprint entry\n\t\t\ttk.tokenfinder([re.sub(\"[\\(\\)]\", \"\", entry)], \"/Users/ps22344/Downloads/craig_0208\")\n\t\treturn results", "def count_gold(pyramid):\n\n #replace this for solution\n return 0", "def embedcount(line):\r\n\r\n x_temp = line.count(BOX_CHAR['lu'])\r\n return self.defaults.get('size')-(4*x_temp)", "def count(self, word):\n pass", "def __init__(self, n, sents):\n assert n > 0\n self._n = n\n print(\"Counting...\")\n count = defaultdict(int)\n while n >= 0:\n for sent in sents:\n s = sent[:] # En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n print(\"Computing vocabulary...\")\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc) # vocabulary size\n print(\"Done\")", "def get_marble_count(self):", "def pix_analysis(length, legs, count):\n mats_wins = 0\n pats_wins = 0\n ties = 0\n for _ in range(count):\n end_of_game = pix(length, legs)\n if end_of_game == 1:\n mats_wins += 1\n elif end_of_game == -1:\n pats_wins += 1\n else:\n ties += 1\n print('Mats wins:', mats_wins, '\\nPats wins:', pats_wins, '\\nTies:', ties)", "def count_words_and_dublicates(novel):", "def count(self, sub) -> int:\n pass", "def gc(sequence):\n sequence = sequence.upper()\n return (sequence.count('G') + sequence.count('C')) / float(len(sequence))", "def pix(length, legs):\n pos_m = 0\n pos_p = 0\n for i in range(1, legs+1):\n print(i, '. leg')\n pos_m, pos_p = one_leg(length, pos_m, pos_p)\n if pos_m == length:\n print('Mat wins')\n return 1\n elif pos_p == length:\n print('Pat wins')\n return -1\n return 0", "def __init__(self, n, sents, gamma=None, addone=True):\n assert n > 0\n self._n = n\n\n if gamma is not None:\n # everything is training data\n train_sents = sents\n else:\n # 90% training, 10% held-out\n m = int(0.45 * len(sents))\n l = int(0.65 * len(sents))\n train_sents = sents[:m] + sents[l:]\n held_out_sents = sents[m:l]\n\n print('Computing counts...')\n count = defaultdict(int)\n while (n >= 0):\n for sent in train_sents:\n s = sent[:] ## En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n # WORKed HERE!!\n # COMPUTE COUNTS FOR ALL K-GRAMS WITH K <= N\n\n # compute vocabulary size for add-one in the last step\n self._addone = addone\n if addone:\n print('Computing vocabulary...')\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc)\n\n # compute gamma if not given\n if gamma is not None:\n self._gamma = gamma\n else:\n print('Computing gamma...')\n self._gamma = gamma = 1\n p = self.log_prob(held_out_sents)\n new_gamma = 2\n streak = 1\n growing = True\n turns = 0\n while (turns < 15):\n self._gamma = new_gamma\n np = self.log_prob(held_out_sents)\n gamma = new_gamma\n if (np > p):\n if growing:\n streak += 1\n else:\n turns += 1\n streak = 0\n growing = True\n new_gamma = new_gamma + 2 ** streak\n else:\n if growing:\n turns += 1\n streak = 0\n growing = False\n else:\n streak += 1\n new_gamma = new_gamma - 2 ** streak\n p = np\n self._gamma = new_gamma\n print(self._gamma)", "def count_sheeps(sheep):\n return sheep.count(True)", "def _getCountForUnigram(self,word1):\n count=self.unigrams[(word1)]\n if count==0:\n count=0.001\n return count", "def count_subs(x,y):\n\t# Encases diagonals in square grid of size 'square'\n\tsquare = x + y - 2\n\tsubs = 0\n\t# For every point counts the number of rectagles with (a,b) as upper left corner\n\tfor a in range(square):\n\t\tfor b in range(square):\n\t\t\tif valid(a,b,x,y):\n\t\t\t\tthis_subs = subs_at_point(a,b,x,y)\n\t\t\t\tprint \"%3d \" %(this_subs),\n\t\t\tprint \"\"\n\treturn subs", "def _profile(self, text):\n prof = zeros(len(self.alph)**self.N)\n ngs = ngrams(text, self.N)\n for tup in ngs:\n loc = 0\n for i in range(len(tup)):\n loc += (len(self.alph)**i) * self.alph.index(tup[i])\n prof[loc] += 1\n return prof", "def catsPerWord(self, thresh):\n totalCats = 0\n words = 0\n for word, wordFreqs in self.lex.items():\n dictSize = len([f for f in wordFreqs.values() if f >= thresh])\n totalCats += dictSize\n if sum(wordFreqs.values()) >= 20:\n words += 1\n return float(totalCats)/float(words)", "def countComponents26(cube):\n n,l = labelComponents26(cube);\n return n;", "def length_score( canvas ):\n score = 0\n for seqpos, nt in canvas.nucleotides.iteritems():\n if seqpos + 1 not in canvas.nucleotides.keys(): continue\n\n d = distance( nt, canvas.nucleotides[seqpos+1] )\n #print \"Distance between %d and %d is %f\" % (seqpos, seqpos+1, d)\n #score += harmonic_penalty( d, NT_DISTANCE, spring_constant )\n score += flat_harmonic_penalty( d, NT_MIN_DISTANCE, NT_MAX_DISTANCE, spring_constant )\n\n return score", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def forcast(self, doc):\n num_words = _get_num_words(doc)\n\n if num_words < 150:\n return 0\n\n mono_syllabic = 0\n for i in range(150):\n if syllapy.count(doc[i].text) == 1:\n mono_syllabic += 1\n return 20 - (mono_syllabic / 10)", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def get_correct_lap_count(self):", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def coverage(text: str) -> float:\n words = set(text.split(' '))\n return len([w for w in words if frequency(w) != 0]) / len(words) * 100" ]
[ "0.6071192", "0.59242237", "0.5802611", "0.57847875", "0.5759973", "0.5750524", "0.572034", "0.5685732", "0.56412256", "0.5637095", "0.5602329", "0.5563017", "0.55156827", "0.54847884", "0.5453589", "0.5435633", "0.5429448", "0.54287434", "0.5422308", "0.54129076", "0.5397255", "0.53913367", "0.53719836", "0.5360537", "0.5360537", "0.5360537", "0.5360537", "0.53577274", "0.53416777", "0.53331536" ]
0.76374227
0
tests that passing in a kwarg to the update method that isn't a column will fail
def test_invalid_update_kwarg(self): with self.assertRaises(ValidationError): TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invalid_update_kwarg(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)", "def test_invalid_update_kwarg(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n with self.assertRaises(ValidationError):\n m0.update(numbers=20)", "def test_invalid_update_kwarg(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n with self.assertRaises(ValidationError):\r\n m0.update(numbers=20)", "def test_update_on_unique_field_raises(test_store):\n\n with pytest.raises(NotImplementedError):\n test_store.update(fields={\"name\": \"Andy\"})", "def test_do_cell_update_ignores_unknown_fields(self, mock_update):\n client = mock.Mock()\n inventory = mock.Mock()\n inventory.cells = cells.CellManager(mock.ANY,\n mock.ANY,\n 'http://127.0.0.1/')\n client.inventory = mock.Mock(name='inventory')\n client.inventory.return_value = inventory\n invalid_input = Namespace(region=1,\n id=1,\n name='mock_cell',\n invalid=True)\n cells_shell.do_cell_update(client, invalid_input)\n vars(invalid_input).pop('region')\n vars(invalid_input).pop('invalid')\n mock_update.assert_called_once_with(**vars(invalid_input))", "def update(self,attribute,values,**kwargs):\n\n # the asked keys\n #keys = kwargs.keys()\n\n # check if the column exists\n try:\n self.c.execute(\"SELECT EXISTS(SELECT {an} FROM ATOM)\".format(an=attribute))\n except:\n print('Error column %s not found in the database' %attribute)\n self.get_colnames()\n raise ValueError('Attribute name not recognized')\n\n #if len(kwargs) == 0:\n # raise ValueError('Update without kwargs seem to be buggy. Use rowID=list(range(natom)) instead')\n\n # handle the multi model cases\n # this is still in devs and not necessary\n # for deeprank.\n # We will have to deal with that if we\n # release pdb2sql as a standalone\n # if 'model' not in keys and self.nModel > 0:\n # for iModel in range(self.nModel):\n # kwargs['model'] = iModel\n # self.update(attribute,values,**kwargs)\n # return\n\n # parse the attribute\n if ',' in attribute:\n attribute = attribute.split(',')\n\n if not isinstance(attribute,list):\n attribute = [attribute]\n\n\n # check the size\n natt = len(attribute)\n nrow = len(values)\n ncol = len(values[0])\n\n if natt != ncol:\n raise ValueError('Number of attribute incompatible with the number of columns in the data')\n\n\n # get the row ID of the selection\n rowID = self.get('rowID',**kwargs)\n nselect = len(rowID)\n\n if nselect != nrow:\n raise ValueError('Number of data values incompatible with the given conditions')\n\n # prepare the query\n query = 'UPDATE ATOM SET '\n query = query + ', '.join(map(lambda x: x+'=?',attribute))\n #if len(kwargs)>0: # why did I do that ...\n query = query + ' WHERE rowID=?'\n\n # prepare the data\n data = []\n for i,val in enumerate(values):\n\n tmp_data = [ v for v in val ]\n\n #if len(kwargs)>0: Same here why did I do that ?\n # here the conversion of the indexes is a bit annoying\n tmp_data += [rowID[i]+1]\n\n data.append(tmp_data)\n\n self.c.executemany(query,data)", "def update_values(self, *arg, **kwargs):\n keys = set(self.keys())\n if arg:\n for x in arg:\n xkeys = set(x.keys())\n if xkeys.issubset(keys):\n self.update(x)\n else:\n raise ValueError(\n \"'{}' contains columns not in this row currently\"\n .format(x)\n )\n if kwargs:\n kwkeys = set(kwargs.keys())\n if kwkeys.issubset(keys):\n self.update(kwargs)\n else:\n raise ValueError(\n \"'{}' contains columns not in this row currently\"\n .format(kwargs)\n )", "def test_no_column(self):\n\n self.assertRaises(ValueError, self.table.where, 'True')", "def test_deep_update_illegal_update(self):\n # Update with an illegal type\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\" \"\".format(type({}), type(update_with)),\n ):\n dictupdate.update_dict_key_value({}, \"foo\", update_with)\n # Again, but now using OrderedDicts\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\"\n \"\".format(type(OrderedDict()), type(update_with)),\n ):\n dictupdate.update_dict_key_value(\n {}, \"foo\", update_with, ordered_dict=True\n )", "def test_update_args_bad(self):\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n\n r.update(10)\n d[\"id\"] = 10\n self.assertEqual(r.__dict__, d)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = \"width must be > 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = \"height must be > 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = \"x must be >= 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = \"y must be >= 0\"\n self.assertEqual(str(e.exception), s)", "def test_update_to_non_json():\n starting_db = create_db(STARTING_DB_INPUT)\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n \"this isn't json :(\"\n )", "def test_update_from_none(self):\r\n ctx = {}\r\n col = columns.Set(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement({1, 2, 3, 4}, None, ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == {1, 2, 3, 4}\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])", "def test_update_attribute_method9(self):\n with self.assertRaises(TypeError):\n r1 = Rectangle(10, 10, 10, 10)\n r1.update(\"put\", \"new\")", "def test_using_nonexistant_column_names_in_query_args_raises_error(self):\r\n with self.assertRaises(AttributeError):\r\n TestModel.objects(TestModel.nonsense == 5)", "def test_update_from_none(self):\r\n ctx = {}\r\n col = columns.List(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement([1, 2, 3], None, ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == [1, 2, 3]\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])", "def test_update_update_has_a_value(self):\n self.Person.drop_collection()\n\n author = self.Person.objects.create(name=\"Test User\")\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update({})\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update_one({})", "def test_insert_or_update_query(self):\n\n row = (\n 'source',\n 'signal',\n 'time_type',\n 'geo_type',\n 'time_value',\n 'geo_value',\n 'value',\n 'stderr',\n 'sample_size',\n )\n mock_connector = MagicMock()\n database = Database()\n database.connect(connector_impl=mock_connector)\n\n database.insert_or_update(*row)\n\n connection = mock_connector.connect()\n cursor = connection.cursor()\n self.assertTrue(cursor.execute.called)\n\n sql, args = cursor.execute.call_args[0]\n self.assertEqual(args, row)\n\n sql = sql.lower()\n self.assertIn('insert into', sql)\n self.assertIn('`covidcast`', sql)\n self.assertIn('unix_timestamp', sql)\n self.assertIn('on duplicate key update', sql)", "def test_unsupported_object(self):\n\n self.assertRaises(\n (TypeError, ValueError), self.table.where, '[]'\n )\n self.assertRaises(TypeError, self.table.where, 'obj', {'obj': {}})\n self.assertRaises(\n (TypeError, ValueError), self.table.where, 'c_bool < []'\n )", "def put(self,colname,value,**kwargs):\n arguments = {'where' : \"String e.g 'chainID = 'A''\",\n 'index' : \"Array e.g. [27,28,30]\",\n 'name' : \"'CA' atome name\",\n 'query' : \"SQL query e.g. 'WHERE chainID='B' AND resName='ASP' \"}\n\n # the asked keys\n keys = kwargs.keys()\n\n # if we have more than one key we kill it\n if len(keys)>1 :\n print('You can only specify 1 conditional statement for the pdb2sql.put function')\n return\n\n # check if the column exists\n try:\n self.c.execute(\"SELECT EXISTS(SELECT {an} FROM ATOM)\".format(an=colname))\n except:\n print('Error column %s not found in the database' %colname)\n self.get_colnames()\n return\n\n\n # if we have 0 key we take the entire db\n if len(kwargs) == 0:\n query = 'UPDATE ATOM SET {cn}=?'.format(cn=colname)\n value = tuple([value])\n self.c.execute(query,value)\n return\n\n # otherwise we have only one key\n key = list(keys)[0]\n cond = kwargs[key]\n\n # select which key we have\n if key == 'where':\n query = 'UPDATE ATOM SET {cn}=? WHERE {cond}'.format(cn=colname,cond=cond)\n value = tuple([value])\n self.c.execute(query,value)\n\n elif key == 'name' :\n values = tuple([value,cond])\n query = 'UPDATE ATOM SET {cn}=? WHERE name=?'.format(cn=colname)\n self.c.execute(query,values)\n\n elif key == 'index' :\n values = tuple([value] + [v+1 for v in cond])\n qm = ','.join(['?' for i in range(len(cond))])\n query = 'UPDATE ATOM SET {cn}=? WHERE rowID in ({qm})'.format(cn=colname,qm=qm)\n self.c.execute(query,values)\n\n elif key == 'query' :\n query = 'UPDATE ATOM SET {cn}=? {c1}'.format(cn=colname,c1=cond)\n value = tuple([value])\n self.c.execute(query,value)\n\n else:\n print('Error arguments %s not supported in pdb2sql.get()\\nOptions are:\\n' %(key))\n for posskey,possvalue in arguments.items():\n print('\\t' + posskey + '\\t\\t' + possvalue)\n return", "async def test_update_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n await self.collection.update('x', {})", "def test_unknown_names_raise_exception(self):\r\n tm = TestModel.create(count=8, text='123456789')\r\n with self.assertRaises(TypeError):\r\n tm.update(jon='beard')", "def test_mixed_value_and_null_update(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6, text=None)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, 6 if i == 3 else i)\n self.assertEqual(row.text, None if i == 3 else str(i))", "def test_update_record(self):\n pass", "def test_update_attribute_method8(self):\n with self.assertRaises(ValueError):\n r1 = Rectangle(10, 10, 10, 10)\n r1.update(2, -3)", "def test_updating_record_with_kwargs(self, test_domain):\n identifier = uuid4()\n person = test_domain.repository_for(Person)._dao.create(\n id=identifier, first_name=\"Johnny\", last_name=\"John\", age=2\n )\n\n test_domain.repository_for(Person)._dao.update(person, age=10)\n u_person = test_domain.repository_for(Person)._dao.get(identifier)\n assert u_person is not None\n assert u_person.age == 10", "def test_primary_key_update_failure(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)", "def test_update_direction_query(self):\n\n args = (\n 'source',\n 'signal',\n 'time_type',\n 'geo_type',\n 'time_value',\n 'geo_value',\n 'direction',\n )\n mock_connector = MagicMock()\n database = Database()\n database.connect(connector_impl=mock_connector)\n\n database.update_direction(*args)\n\n connection = mock_connector.connect()\n cursor = connection.cursor()\n self.assertTrue(cursor.execute.called)\n\n sql, args = cursor.execute.call_args[0]\n expected_args = (\n 'direction',\n 'source',\n 'signal',\n 'time_type',\n 'geo_type',\n 'time_value',\n 'geo_value',\n )\n self.assertEqual(args, expected_args)\n\n sql = sql.lower()\n self.assertIn('update', sql)\n self.assertIn('`covidcast`', sql)\n self.assertIn('`timestamp2` = unix_timestamp', sql)\n self.assertIn('`direction` = %s', sql)", "def test_no_uid_causes_error():\n empty = create_db()\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n empty,\n \"some_uid\",\n INP\n )", "def testColumnsNotListWrapped(self):\n with self.assertRaises(TypeError):\n Table(self.base_case[\"name\"], Column(\"col1\", str))", "def test_primary_key_update_failure(self):\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)" ]
[ "0.7227087", "0.7104254", "0.7074279", "0.6329433", "0.62189066", "0.61630607", "0.6159414", "0.6145743", "0.6094437", "0.60466427", "0.59687847", "0.59339607", "0.5895221", "0.58757293", "0.5865568", "0.5816516", "0.57398236", "0.5725852", "0.5719", "0.5708378", "0.57044303", "0.56730783", "0.5672898", "0.5670714", "0.56346303", "0.5624603", "0.56236744", "0.5618991", "0.56105185", "0.5606561" ]
0.72344637
0
setting a field to null in the update should issue a delete statement
def test_null_update_deletes_column(self): partition = uuid4() for i in range(5): TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i)) # sanity check for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)): self.assertEqual(row.cluster, i) self.assertEqual(row.count, i) self.assertEqual(row.text, str(i)) # perform update TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None) for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)): self.assertEqual(row.cluster, i) self.assertEqual(row.count, i) self.assertEqual(row.text, None if i == 3 else str(i))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_null_update_deletes_column(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == (None if i == 3 else str(i))", "def test_update_field_to_null(self, field, field_name):\n control = factories.ControlFactory()\n\n response = self.api.put(control, control.id, {field: None})\n\n self.assert400(response)\n self.assertEqual(response.json[\"message\"],\n field_name + \" for the object is not specified\")\n control = db.session.query(all_models.Control).get(control.id)\n self.assertIsNotNone(control.external_id)", "def test_update_risk_field_to_null(self, field, field_name):\n risk = factories.RiskFactory()\n\n response = self.api.put(risk, risk.id, {\n field: None,\n })\n\n self.assert400(response)\n self.assertEqual(response.json[\"message\"],\n field_name + \" for the object is not specified\")\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertIsNotNone(risk.external_id)", "def revive(self):\n field_name = self.get_delete_flag_field_name()\n return self.update(**{field_name: None})", "def test_null_update(self):\r\n c = SetUpdateClause('s', None, previous={1, 2})\r\n c._analyze()\r\n c.set_context_id(0)\r\n\r\n self.assertIsNone(c._assignments)\r\n self.assertIsNone(c._additions)\r\n self.assertIsNone(c._removals)\r\n\r\n self.assertEqual(c.get_context_size(), 0)\r\n self.assertEqual(str(c), '')\r\n\r\n ctx = {}\r\n c.update_context(ctx)\r\n self.assertEqual(ctx, {})", "def NULL(self, t):\n t.value = None\n return t", "def _delete_null_columns(self):\r\n ds = DeleteStatement(self.column_family_name)\r\n deleted_fields = False\r\n for _, v in self.instance._values.items():\r\n col = v.column\r\n if v.deleted:\r\n ds.add_field(col.db_field_name)\r\n deleted_fields = True\r\n elif isinstance(col, Map):\r\n uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)\r\n if uc.get_context_size() > 0:\r\n ds.add_field(uc)\r\n deleted_fields = True\r\n\r\n if deleted_fields:\r\n for name, col in self.model._primary_keys.items():\r\n ds.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(ds)", "def __location_del(self):\n self.db_location = None\n self.save(update_fields=[\"db_location\"])", "def test_reverse_delete_rule_nullify(self):\n\n class Category(Document):\n name = StringField()\n\n class BlogPost(Document):\n content = StringField()\n category = ReferenceField(Category, reverse_delete_rule=NULLIFY)\n\n BlogPost.drop_collection()\n Category.drop_collection()\n\n lameness = Category(name=\"Lameness\")\n lameness.save()\n\n post = BlogPost(content=\"Watching TV\", category=lameness)\n post.save()\n\n assert BlogPost.objects.count() == 1\n assert BlogPost.objects.first().category.name == \"Lameness\"\n Category.objects.delete()\n assert BlogPost.objects.count() == 1\n assert BlogPost.objects.first().category is None", "def test_map_update_none_deletes_key(self):\r\n # partition = uuid4()\r\n # cluster = 1\r\n # TestQueryUpdateModel.objects.create(\r\n # partition=partition, cluster=cluster,\r\n # text_map={\"foo\": '1', \"bar\": '2'})\r\n # TestQueryUpdateModel.objects(\r\n # partition=partition, cluster=cluster).update(\r\n # text_map__update={\"bar\": None})\r\n # obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\r\n # self.assertEqual(obj.text_map, {\"foo\": '1'})\r", "def test_delete_edge_case_with_write_concern_0_return_None(self):\n p1 = self.Person(name=\"User Z\", age=20).save()\n del_result = p1.delete(w=0)\n assert del_result is None", "def clearField(self):\n self.field.setValue(self.default_val)", "def clearField(self):\n self.field.setValue(self.default_val)", "def test_mixed_value_and_null_update(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6, text=None)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == (6 if i == 3 else i)\r\n assert row.text == (None if i == 3 else str(i))", "def delete(self):\n ret = False\n q = self.query\n pk = self.pk\n if pk:\n pk_name = self.schema.pk.name\n self.query.is_field(pk_name, pk).delete()\n setattr(self, pk_name, None)\n\n # mark all the fields that still exist as modified\n self.reset_modified()\n for field_name in self.schema.fields:\n if getattr(self, field_name, None) != None:\n self.modified_fields.add(field_name)\n\n ret = True\n\n return ret", "def __delete__(self, instance):\r\n if instance:\r\n if self.column.can_delete:\r\n instance._values[self.column.column_name].delval()\r\n else:\r\n raise AttributeError('cannot delete {} columns'.format(self.column.column_name))", "def test_mixed_value_and_null_update(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6, text=None)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, 6 if i == 3 else i)\n self.assertEqual(row.text, None if i == 3 else str(i))", "def delete(self):\n self.read = False\n self.write = False\n self.save()", "def test_name_not_null(self):\n buffer = copy(self.entity1)\n buffer.name = None\n with self.assertRaises(ValidationError):\n buffer.save()\n\n transaction.rollback()", "def test_map_update_none_deletes_key(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition, cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'})\n TestQueryUpdateModel.objects(\n partition=partition, cluster=cluster).update(\n text_map__update={\"bar\": None})\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_map, {\"foo\": '1'})", "def delval(self):\r\n self.value = None", "def delete_field(self):\n self.exec_command(b\"DeleteField\")", "def unset(self) -> None:\n self.val = None\n self.notes = []", "def emptyGroupementPk(heb):\n heb.heb_groupement_pk = None\n heb.update()", "def nullify(self):\n self._original_values.clear()\n self._modified_values.clear()\n self._extra_record_data.clear()\n self._references.clear()\n for mtm in self._mtm_referencelist:\n self._mtm_referencelist[mtm].parentobjectid = None\n for chl in self._child_referencelist:\n self._child_referencelist[chl].clear() \n self._ismodified = False\n self._hasdata = False\n self._astxt = \"(null)\"\n if self._table: \n for f in self._table:\n self._original_values[f.name] = None", "def _set_None(self):\n\n self.description = None\n self.func = None", "def delete(self):\n self.data = None", "def test_entities__Entity__removeField__2(entity_with_field, field):\n entity_with_field.removeField(field)\n assert field.interface is None", "def clearField(self):\n self.field.setDate(datetime.now().date())", "def delete_field(model, *arg):\n return model._pw_index_.delete_field(*arg)" ]
[ "0.6961704", "0.67482066", "0.65010685", "0.61259604", "0.5992511", "0.5938201", "0.5856239", "0.5753922", "0.57415175", "0.5727646", "0.5726502", "0.57241607", "0.57241607", "0.5721469", "0.57177246", "0.57076424", "0.57003665", "0.56877303", "0.56858563", "0.56692576", "0.5659547", "0.56409097", "0.5609798", "0.5609591", "0.55993867", "0.5589416", "0.5566584", "0.555712", "0.5541982", "0.55038404" ]
0.7050896
0
The CQL behavior is if you set a key in a map to null it deletes that key from the map. Test that this works with __update.
def test_map_update_none_deletes_key(self): partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create( partition=partition, cluster=cluster, text_map={"foo": '1', "bar": '2'}) TestQueryUpdateModel.objects( partition=partition, cluster=cluster).update( text_map__update={"bar": None}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {"foo": '1'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_map_update_none_deletes_key(self):\r\n # partition = uuid4()\r\n # cluster = 1\r\n # TestQueryUpdateModel.objects.create(\r\n # partition=partition, cluster=cluster,\r\n # text_map={\"foo\": '1', \"bar\": '2'})\r\n # TestQueryUpdateModel.objects(\r\n # partition=partition, cluster=cluster).update(\r\n # text_map__update={\"bar\": None})\r\n # obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\r\n # self.assertEqual(obj.text_map, {\"foo\": '1'})\r", "def test_map_update_remove(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition,\n cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'}\n )\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove={\"bar\"},\n text_map__update={\"foz\": '4', \"foo\": '2'}\n )\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_map, {\"foo\": '2', \"foz\": '4'})\n\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove={\"foo\", \"foz\"}\n )\n self.assertEqual(\n TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster).text_map,\n {}\n )", "def test_updates_to_none(self):\r\n m = TestMapModel.create(int_map={1: uuid4()})\r\n m.int_map = None\r\n m.save()\r\n\r\n m2 = TestMapModel.get(partition=m.partition)\r\n assert m2.int_map == {}", "def test_null_update(self):\r\n c = SetUpdateClause('s', None, previous={1, 2})\r\n c._analyze()\r\n c.set_context_id(0)\r\n\r\n self.assertIsNone(c._assignments)\r\n self.assertIsNone(c._additions)\r\n self.assertIsNone(c._removals)\r\n\r\n self.assertEqual(c.get_context_size(), 0)\r\n self.assertEqual(str(c), '')\r\n\r\n ctx = {}\r\n c.update_context(ctx)\r\n self.assertEqual(ctx, {})", "def test_updates_from_none(self):\r\n m = TestMapModel.create(int_map=None)\r\n expected = {1: uuid4()}\r\n m.int_map = expected\r\n m.save()\r\n\r\n m2 = TestMapModel.get(partition=m.partition)\r\n assert m2.int_map == expected\r\n\r\n m2.int_map = None\r\n m2.save()\r\n m3 = TestMapModel.get(partition=m.partition)\r\n assert m3.int_map != expected", "def test_pos_operate_write_set_to_aerospike_null(self):\n key = (\"test\", \"demo\", \"null_record\")\n\n bins = {\"name\": \"John\", \"no\": 3}\n\n assert 0 == self.as_connection.put(key, bins)\n\n (key, _, bins) = self.as_connection.get(key)\n\n assert {\"name\": \"John\", \"no\": 3} == bins\n\n llist = [\n {\"op\": aerospike.OPERATOR_WRITE, \"bin\": \"no\", \"val\": aerospike.null()},\n {\"op\": aerospike.OPERATOR_READ, \"bin\": \"no\"},\n ]\n\n (key, _, bins) = self.as_connection.operate(key, llist)\n assert {} == bins\n\n self.as_connection.remove(key)", "def test_pos_operate_prepend_set_to_aerospike_null(self):\n key = (\"test\", \"demo\", \"null_record\")\n\n bins = {\"name\": \"John\", \"no\": 3}\n\n assert 0 == self.as_connection.put(key, bins)\n\n (key, _, bins) = self.as_connection.get(key)\n\n assert {\"name\": \"John\", \"no\": 3} == bins\n\n llist = [\n {\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"no\", \"val\": aerospike.null()},\n {\"op\": aerospike.OPERATOR_READ, \"bin\": \"no\"},\n ]\n\n try:\n (key, _, bins) = self.as_connection.operate(key, llist)\n\n except e.InvalidRequest as exception:\n assert exception.code == 4\n self.as_connection.remove(key)", "def test_map_remove_rejects_non_sets(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition,\n cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'}\n )\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove=[\"bar\"]\n )", "def test_delete(self):\n mute_map = MutableMap(**VALUE)\n del mute_map.str_val\n del mute_map['dict_val']\n\n assert not mute_map.get('str_val')\n assert not mute_map.get('dict_val')", "def delete(self, key):", "def delete(self, key):\n self.map.pop(key, None)", "def discard(m: MutableMapping[KT, VT], key: KT) -> None:\n try:\n del m[key]\n except KeyError:\n pass", "def convert_nulls(dic, null_value):\n for key in dic.iterkeys():\n if dic[key] is None:\n dic[key] = null_value", "def test_null_update_deletes_column(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == (None if i == 3 else str(i))", "def test_no_update(self):\r\n c = SetUpdateClause('s', {1, 2}, previous={1, 2})\r\n c._analyze()\r\n c.set_context_id(0)\r\n\r\n self.assertIsNone(c._assignments)\r\n self.assertIsNone(c._additions)\r\n self.assertIsNone(c._removals)\r\n\r\n self.assertEqual(c.get_context_size(), 0)\r\n self.assertEqual(str(c), '')\r\n\r\n ctx = {}\r\n c.update_context(ctx)\r\n self.assertEqual(ctx, {})", "def delete(self, key):\n return None", "def test_null_update_deletes_column(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, None if i == 3 else str(i))", "def _del(self, entry):\n entry.key = dummy\n entry.value = None\n self.used -= 1", "def _remove_copyset(mapping: MutableMapping[T, CopySet['Entity']], key: T, ent: 'Entity') -> None:\n copyset = mapping.get(key, None)\n if copyset is not None:\n copyset.discard(ent)\n if not copyset:\n del mapping[key]", "def testDeletingUnknownKey(self):\n\n memcache.delete('unknown')", "def __delitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n q = q.filter(PAW2_DBObject.key == key)\n assert q.delete(synchronize_session=False) == 1\n session.commit()", "def setnoempty(self, key, value):\r\n if value:\r\n self[key] = value", "def _delete_null_columns(self):\r\n ds = DeleteStatement(self.column_family_name)\r\n deleted_fields = False\r\n for _, v in self.instance._values.items():\r\n col = v.column\r\n if v.deleted:\r\n ds.add_field(col.db_field_name)\r\n deleted_fields = True\r\n elif isinstance(col, Map):\r\n uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)\r\n if uc.get_context_size() > 0:\r\n ds.add_field(uc)\r\n deleted_fields = True\r\n\r\n if deleted_fields:\r\n for name, col in self.model._primary_keys.items():\r\n ds.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(ds)", "def delete(self,key):\n\n pass", "def test_empty_key(mmap):\n kwargs = {} if mmap == -1 else {'mmap': mmap}\n\n fp = _tempfile.TemporaryFile()\n try:\n cdb = _cdbx.CDB.make(fp, **kwargs)\n for _ in range(10):\n cdb.add(\"\", \"\")\n cdb = cdb.commit()\n assert len(cdb) == 1\n finally:\n fp.close()", "def remove(self, key):", "def remove(self, key):\n h = key%self.m\n a = self.a\n if a[h]:\n a[h] = None", "def removeDic(dic, key):\n pass", "def remove_empty(d):\n for key in d.keys():\n if d[key] is None:\n del d[key]\n return d", "def unset_queries(self, *args):\n for k in args:\n self._query_dict.pop(k, None)" ]
[ "0.7591701", "0.64201266", "0.6304367", "0.6299304", "0.6194888", "0.6107784", "0.6074144", "0.59651446", "0.59280145", "0.5914961", "0.58507895", "0.58324605", "0.58044624", "0.57662004", "0.5711495", "0.5688181", "0.5680886", "0.56762064", "0.56690216", "0.56450295", "0.5637419", "0.5631714", "0.56293905", "0.56243634", "0.5612271", "0.5607088", "0.5601171", "0.55769277", "0.5511984", "0.55113274" ]
0.7424211
1
Test that map item removal with update(__remove=...) works PYTHON688
def test_map_update_remove(self): partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create( partition=partition, cluster=cluster, text_map={"foo": '1', "bar": '2'} ) TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update( text_map__remove={"bar"}, text_map__update={"foz": '4', "foo": '2'} ) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {"foo": '2', "foz": '4'}) TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update( text_map__remove={"foo", "foz"} ) self.assertEqual( TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster).text_map, {} )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_map_remove_rejects_non_sets(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition,\n cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'}\n )\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove=[\"bar\"]\n )", "def test_delete(self):\n mute_map = MutableMap(**VALUE)\n del mute_map.str_val\n del mute_map['dict_val']\n\n assert not mute_map.get('str_val')\n assert not mute_map.get('dict_val')", "def remove(self, item):\n del self._dict[item]", "def _remove_inst(inst_map, remove_id_list):\n for inst_id in remove_id_list:\n inst_map[inst_map == inst_id] = 0\n return inst_map", "def _testRemove(self):\n key = ('foo', 'bar')\n data = r'text!\\nthere'\n\n with self.cache.Lookup(key) as ref:\n self.assertFalse(ref.Exists())\n ref.AssignText(data)\n self.assertTrue(ref.Exists())\n ref.Remove()\n self.assertFalse(ref.Exists())", "def _map___delitem__(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n self.erase(self.find(key))\n return", "def test_map_update_none_deletes_key(self):\r\n # partition = uuid4()\r\n # cluster = 1\r\n # TestQueryUpdateModel.objects.create(\r\n # partition=partition, cluster=cluster,\r\n # text_map={\"foo\": '1', \"bar\": '2'})\r\n # TestQueryUpdateModel.objects(\r\n # partition=partition, cluster=cluster).update(\r\n # text_map__update={\"bar\": None})\r\n # obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\r\n # self.assertEqual(obj.text_map, {\"foo\": '1'})\r", "def test_delete_voltage_map_item(self):\n pass", "def remove(self, key):", "def del_map(self, event, handle, *args):\n if args:\n self.base[event].remove((handle, args))\n else:\n self.base[event] = filter(lambda ind: \n ind[0] != handle, self.base[event])", "def test_remove(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"tmp\", 0, sched)\n inst_map.remove(\"tmp\", 0)\n self.assertFalse(inst_map.has(\"tmp\", 0))\n with self.assertRaises(PulseError):\n inst_map.remove(\"not_there\", (0,))\n self.assertFalse(\"tmp\" in inst_map.qubit_instructions(0))", "def test_remove(self):\n pass", "def remove (self, item):\n pass", "def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")", "def remove_item_from_map(): \n ITEM_LIST[ZERO_BASE_PLYR_POS] = int(len(ITEMTYPES) - 2) # Replaces item with the \"None\" item", "def removeObjectMap(self,fromMod,toMod):\n if self.objectMaps == None: self.loadObjectMaps()\n del self.objectMaps[(fromMod,toMod)]", "def __delitem__(self, key):\n try:\n del self._maps[0][key]\n except KeyError:\n raise KeyError(\n 'Key not found in the last mapping: {!r}'.format(key))", "def test_map_update_none_deletes_key(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition, cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'})\n TestQueryUpdateModel.objects(\n partition=partition, cluster=cluster).update(\n text_map__update={\"bar\": None})\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_map, {\"foo\": '1'})", "def remove(self, key: int) -> None:\n if key in self.map:\n del self.map[key]", "def __delitem__(self, key):\n pass", "def __delitem__(self, key):\n pass", "def discard(m: MutableMapping[KT, VT], key: KT) -> None:\n try:\n del m[key]\n except KeyError:\n pass", "def delete(self, key):\n self.map.pop(key, None)", "def test_delete_saved_app_map_search(self):\n pass", "def test_size_changes_on_remove(multi_trie):\n multi_trie.remove(\"hello\")\n assert multi_trie.size() == 5", "def _del(self, entry):\n entry.key = dummy\n entry.value = None\n self.used -= 1", "def remove():", "def remove_item(self, item):\r\n\r\n for key in self._inner_dict:\r\n if item in self._inner_dict[key]:\r\n idx = self._inner_dict[key].index(item)\r\n del self._inner_dict[key][idx]", "def test_destroy_map2(self):\r\n Z = as_tensor_variable(self.rand(2, 2))\r\n A = as_tensor_variable(self.rand(2, 2))\r\n try:\r\n gemm_inplace(Z, 1.0, inplace.transpose_inplace(Z), A, 1.0)\r\n except InconsistencyError, e:\r\n if exc_message(e) == Gemm.E_z_uniq:\r\n return\r\n self.fail()", "def __delitem__(self,key):\n if key in self._register:\n del self._register[key]" ]
[ "0.71496063", "0.6998314", "0.6717322", "0.6676355", "0.6534673", "0.6512091", "0.645768", "0.64257306", "0.64203507", "0.6369451", "0.6358273", "0.6289909", "0.62580013", "0.6257078", "0.6239999", "0.62256265", "0.6181995", "0.61457175", "0.6121318", "0.61211246", "0.61211246", "0.6118506", "0.61156756", "0.6096097", "0.60604674", "0.60402405", "0.6020189", "0.6016021", "0.60125124", "0.6012505" ]
0.7701655
0
Map item removal requires a set to match the CQL API PYTHON688
def test_map_remove_rejects_non_sets(self): partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create( partition=partition, cluster=cluster, text_map={"foo": '1', "bar": '2'} ) with self.assertRaises(ValidationError): TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update( text_map__remove=["bar"] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")", "def _remove_copyset(mapping: MutableMapping[T, CopySet['Entity']], key: T, ent: 'Entity') -> None:\n copyset = mapping.get(key, None)\n if copyset is not None:\n copyset.discard(ent)\n if not copyset:\n del mapping[key]", "def _remove_inst(inst_map, remove_id_list):\n for inst_id in remove_id_list:\n inst_map[inst_map == inst_id] = 0\n return inst_map", "def remove_item_from_map(): \n ITEM_LIST[ZERO_BASE_PLYR_POS] = int(len(ITEMTYPES) - 2) # Replaces item with the \"None\" item", "def remove(self, item):\n try:\n self._data.remove(item)\n except ValueError as exc:\n raise KeyError from exc\n else:\n self.__log__.append(SetRemove(value=item))", "def clean_up_map(self):\n self.items = [i for i in self.items if i.quantity != 0]", "def test_map_update_remove(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition,\n cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'}\n )\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove={\"bar\"},\n text_map__update={\"foz\": '4', \"foo\": '2'}\n )\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_map, {\"foo\": '2', \"foz\": '4'})\n\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove={\"foo\", \"foz\"}\n )\n self.assertEqual(\n TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster).text_map,\n {}\n )", "def remove(self, key):", "def remove(self, item):\n del self._dict[item]", "def _map___delitem__(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n self.erase(self.find(key))\n return", "def delete_set(self, item): # TODO test\n tree = item.parent\n item_label = item.parent_node\n tree.remove_node(item)\n tree.remove_node(item_label)\n self.exercise.sets.remove(item.set)\n print(\"delete set\")", "def updateDict(self,strSet):\n\tself.createAdjList(strSet,\"remove\")", "def delete(self, mapitem_id: int):\n pass", "def del_map(self, event, handle, *args):\n if args:\n self.base[event].remove((handle, args))\n else:\n self.base[event] = filter(lambda ind: \n ind[0] != handle, self.base[event])", "def remove():", "def removeFromSet(_session, _el, _set):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_f,\n _set,\n sc.SC_ARC,\n _el), True)\n while not it.is_over():\n _session.erase_el(it.value(1))\n it.next()", "def remove_item(self, item):\r\n\r\n for key in self._inner_dict:\r\n if item in self._inner_dict[key]:\r\n idx = self._inner_dict[key].index(item)\r\n del self._inner_dict[key][idx]", "def delete(self, item):\r\n self.fetch()\r\n t = self.make_item_tuple(item)\r\n changed = False\r\n while t in self.data:\r\n self.data.remove(t)\r\n changed = True\r\n \r\n if changed:\r\n query_cache.set(self.iden, self.data)", "def test_delete(self):\n mute_map = MutableMap(**VALUE)\n del mute_map.str_val\n del mute_map['dict_val']\n\n assert not mute_map.get('str_val')\n assert not mute_map.get('dict_val')", "def remove(self) -> None:\n self.map.remove_ent(self)", "def removeObjectMap(self,fromMod,toMod):\n if self.objectMaps == None: self.loadObjectMaps()\n del self.objectMaps[(fromMod,toMod)]", "def remove_from_multidict(d: MultiDict, key: str, item: typing.Any):\n # works by popping all, removing, then re-adding into\n i = d.popall(key, [])\n if item in i:\n i.remove(item)\n\n for n in i:\n d.add(key, n)\n\n return d", "def remove(self, key: int) -> None:\n if key in self.map:\n del self.map[key]", "def __delitem__(self, key):\n try:\n del self._maps[0][key]\n except KeyError:\n raise KeyError(\n 'Key not found in the last mapping: {!r}'.format(key))", "def removeItem(*args):", "def removeItem(*args):", "def delete(aMap, key):\n\t#get the bucket that they key is in, and sets it to bucket\n\tbucket = get_bucket(aMap, key)\n\n\tfor i in xrange(len(bucket)):\n\t\tk, v = bucket[i]\n\t\tif key == k:\n\t\t\tdel bucket[i]\n\t\t\t#we can break here, since we know there can be only one key/value pair\n\t\t\tbreak", "def discard(m: MutableMapping[KT, VT], key: KT) -> None:\n try:\n del m[key]\n except KeyError:\n pass", "def remove(self):", "def delete(self, key):\n self.map.pop(key, None)" ]
[ "0.66514295", "0.6571657", "0.6545378", "0.6522333", "0.65185094", "0.6443647", "0.6430008", "0.63862544", "0.63650626", "0.63025415", "0.6232438", "0.6213155", "0.6206721", "0.6181063", "0.6170815", "0.61475635", "0.61284", "0.611241", "0.61095846", "0.6069405", "0.60679823", "0.6037412", "0.60348684", "0.60002404", "0.5995804", "0.5995804", "0.5992119", "0.5959798", "0.5952544", "0.59395736" ]
0.6858619
0
Test to ensure that an extra DELETE is not sent if an object is read from the DB with a None value 3.9 PYTHON719 only three queries are executed, the first one for inserting the object, the second one for reading it, and the third one for updating it object_mapper
def test_an_extra_delete_is_not_sent(self): partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create( partition=partition, cluster=cluster) obj = TestQueryUpdateModel.objects( partition=partition, cluster=cluster).first() self.assertFalse({k: v for (k, v) in obj._values.items() if v.deleted}) obj.text = 'foo' obj.save() #execute_count will check the execution count and #assert no more calls than necessary where made
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_edge_case_with_write_concern_0_return_None(self):\n p1 = self.Person(name=\"User Z\", age=20).save()\n del_result = p1.delete(w=0)\n assert del_result is None", "def test_get(self):\n objects = self.populate()\n for obj in objects:\n found = models.storage.get(type(obj), obj.id)\n self.assertIs(found, obj)\n for obj in objects:\n obj.delete()\n found = models.storage.get(type(obj), obj.id)\n self.assertIsNone(found)", "def test_get_none(self):\n models.storage.close()\n models.storage = models.engine.db_storage.DBStorage()\n models.storage.reload()\n obj = self.populate()\n\n found = models.storage.get(type(obj[0]), None)\n self.assertEqual(found, None)\n with self.assertRaises(KeyError):\n models.storage.get(None, obj[0].id)\n with self.assertRaises(KeyError):\n models.storage.get(None, None)", "def test_unsaved(self):\n m = mapper(Order, orders, properties={\n 'description':deferred(orders.c.description)\n })\n\n sess = create_session()\n o = Order()\n sess.save(o)\n o.order_id = 7\n def go():\n o.description = \"some description\"\n self.assert_sql_count(testing.db, go, 0)", "def test_null_update_deletes_column(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, None if i == 3 else str(i))", "def test_delete_record(self):\n pass", "def test_delete_from_database_one_item(session):\n instance = Foo()\n id = persist(session, instance) # store instance in database\n\n stored_instance = get_stored_instance_by_id(session, Foo, id)\n assert stored_instance is not None # instance is present in database\n\n delete_from_database(session, instance) # delete instance from database\n\n with pytest.raises(NoResultFound): # instance no longer present in database\n stored_instance = get_stored_instance_by_id(session, Foo, id)", "def test_null_update_deletes_column(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == (None if i == 3 else str(i))", "def test_data_object_del(self):\n pass", "def test_delete(self):\n self.assertFalse(self.user1.ad_deleted)\n self.assertTrue(self.user1.active)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {'Deleted': True}\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertTrue(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)\n # Also delete a second object, to check for silly 'empty string' collisions.\n url = '/api/users/{}/'.format(self.user2.ad_guid)\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)", "def test_create_get_delete_update_node(self):\n node_dict_1 = {\n 'host_name': 'abc',\n 'local_router_id': '1.1.1.1',\n 'as_num': 100,\n 'bgpls_id': '0.0.0.0',\n 'igp_id': '0.0.0.0'\n }\n node_dict_2 = {\n 'host_name': 'def',\n 'local_router_id': '2.2.2.2',\n 'as_num': 100,\n 'bgpls_id': '0.0.0.0',\n 'igp_id': '0.0.0.0'\n }\n\n # create two objects\n node1 = Node(**node_dict_1)\n node2 = Node(**node_dict_2)\n Node.create_object(self.database, node1.__dict__)\n Node.create_object(self.database, node2.__dict__)\n self.assertEqual(2, Node.count(self.database))\n\n # get one object\n node1 = Node.get_object(self.database, host_name='abc')\n self.assertEqual(node_dict_1['local_router_id'], node1.get('local_router_id'))\n\n # get objects\n nodes = Node.get_objects(self.database, as_num=100)\n self.assertEqual(2, len(nodes))\n\n # update one object\n self.assertEqual(0, Node.count(self.database, local_router_id='3.3.3.3'))\n node_db_obj = Node.update_object(\n self.database, {'local_router_id': '3.3.3.3'}, host_name='abc')\n self.assertEqual('3.3.3.3', node_db_obj.get('local_router_id'))\n self.assertEqual(1, Node.count(self.database, local_router_id='3.3.3.3'))\n\n # update more than objects\n self.assertEqual(2, Node.count(self.database, as_num=100))\n update_count = Node.update_objects(\n self.database, {'as_num': 200}, igp_id='0.0.0.0')\n self.assertEqual(2, update_count)\n self.assertEqual(2, Node.count(self.database, as_num=200))\n\n # delete objects\n Node.delete_object(self.database, host_name='abc')\n self.assertEqual(1, Node.count(self.database))", "def test_static_deletion(self):\n StaticDeleteModel.create(example_id=5, example_clust=5, example_static2=1)\n sdm = StaticDeleteModel.filter(example_id=5).first()\n self.assertEqual(1, sdm.example_static2)\n sdm.update(example_static2=None)\n self.assertIsNone(sdm.example_static2)", "def test_data_object_untrash(self):\n pass", "def test_object_del(self):\n obj0 = Base()\n del obj0\n obj1 = Base()\n self.assertEqual(obj1.id, 2)", "def test_map_update_none_deletes_key(self):\r\n # partition = uuid4()\r\n # cluster = 1\r\n # TestQueryUpdateModel.objects.create(\r\n # partition=partition, cluster=cluster,\r\n # text_map={\"foo\": '1', \"bar\": '2'})\r\n # TestQueryUpdateModel.objects(\r\n # partition=partition, cluster=cluster).update(\r\n # text_map__update={\"bar\": None})\r\n # obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\r\n # self.assertEqual(obj.text_map, {\"foo\": '1'})\r", "def test_delete(self):\n person = Person('test_person_b')\n person.delete()\n with database() as db:\n results = db.query(\"SELECT * FROM persons WHERE person_name = 'test_person_b'\")\n self.assertEqual(results, [])", "def test_delete_data(self):\n data = Data.objects.create(\n name='Test data',\n contributor=self.user,\n process=self.proc,\n )\n\n data.output = {'json_field': {'foo': 'bar'}}\n data.status = Data.STATUS_DONE\n data.save()\n\n self.assertEqual(Storage.objects.count(), 1)\n\n data.delete()\n self.assertEqual(Storage.objects.count(), 0)", "def test_delete_without_any_where_args(self):\r\n with self.assertRaises(query.QueryException):\r\n TestModel.objects(attempt_id=0).delete()", "def test_delete(self):\n\n\t\titem_id = mock_item()[0]\n\t\tmodels.delete(item_id)\n\n\t\titem = models.item(item_id)\n\t\tself.assertIsNone(item)", "def test_deletion(self):\n self.assertEqual(self.store.query(BatchManholePowerup).count(), 0)", "def test_that_updating_a_deleted_aggregate_raises_object_not_found_error(\n self, test_domain\n ):\n\n person = test_domain.repository_for(Person)._dao.create(\n first_name=\"Johnny\", last_name=\"John\"\n )\n test_domain.repository_for(Person)._dao.delete(person)\n with pytest.raises(ObjectNotFoundError):\n test_domain.repository_for(Person)._dao.update(person, {\"age\": 10})", "def test_data_object_del_all(self):\n pass", "def test_updates_to_none(self):\r\n m = TestMapModel.create(int_map={1: uuid4()})\r\n m.int_map = None\r\n m.save()\r\n\r\n m2 = TestMapModel.get(partition=m.partition)\r\n assert m2.int_map == {}", "def test_delete(self):\n self.Person(name=\"User A\", age=20).save()\n self.Person(name=\"User B\", age=30).save()\n self.Person(name=\"User C\", age=40).save()\n\n assert self.Person.objects.count() == 3\n\n self.Person.objects(age__lt=30).delete()\n assert self.Person.objects.count() == 2\n\n self.Person.objects.delete()\n assert self.Person.objects.count() == 0", "def test_nondependent_object_get(self):\n manager = ImporterManager(importer=UserImporter())\n for row,name in enumerate(self.usernames):\n manager.update_kvs(field_name='username',value=name,row=row)\n\n manager.get_available_rows()\n for i in range(self.n_objs):\n objs: List[RecordData] = manager.get_objs_and_meta(i) #: Returns a list of objects only if manytomany\n self.assertEqual(objs[0].available, True)\n self.assertIsNotNone(objs[0].object)\n self.assertIsInstance(objs[0].object, User)\n self.assertIsNotNone(objs[0].query)\n\n del manager", "def test_update_no_pk(self):\n track = Track(artist='Artist', album='Album', title='Title')\n with self.assertRaises(Exception):\n track.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_track_count(), 0)", "def test_delete_from_database_multiple_items(session):\n instance1 = Foo()\n instance2 = Foo()\n id1 = persist(session, instance1)\n id2 = persist(session, instance2)\n\n delete_from_database(session, [instance1, instance2])\n\n with pytest.raises(NoResultFound):\n get_stored_instance_by_id(session, Foo, id1)\n with pytest.raises(NoResultFound):\n get_stored_instance_by_id(session, Foo, id2)", "def test_partial_nondependent_object_get(self):\n MISSING_INDEX = 2\n User.objects.filter(username=self.usernames[MISSING_INDEX]).delete()\n\n manager = ImporterManager(importer=UserImporter())\n for row,name in enumerate(self.usernames):\n manager.update_kvs(field_name='username',value=name,row=row)\n\n manager.get_available_rows()\n for i in range(self.n_objs):\n objs: List[RecordData] = manager.get_objs_and_meta(i) #: Returns a list of objects only if manytomany\n if i==MISSING_INDEX:\n self.assertEqual(objs[0].available, False)\n self.assertIsNone(objs[0].object)\n self.assertIsNotNone(objs[0].query)\n continue\n\n self.assertEqual(objs[0].available, True)\n self.assertIsNotNone(objs[0].object)\n self.assertIsInstance(objs[0].object, User)\n self.assertIsNotNone(objs[0].query)\n\n del manager", "def test_update_no_pk(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n with self.assertRaises(Exception):\n album.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 0)", "def test_delete_concept(self):\n\n # Create a new `Concept` record.\n obj_id, refr = create_concept(dal=self.dal)\n\n self.assertEqual(obj_id, 1)\n\n # Delete the new record.\n self.dal.delete(Concept, obj_id)\n\n # (Attempt to) retrieve the deleted record.\n obj = self.dal.get(Concept, obj_id) # type: Concept\n\n self.assertIsNone(obj)" ]
[ "0.6682354", "0.64057285", "0.6280417", "0.6232558", "0.6079348", "0.6004791", "0.60003245", "0.595957", "0.59452885", "0.5933317", "0.5926694", "0.5922889", "0.5921724", "0.590885", "0.59038156", "0.59009415", "0.5898185", "0.5885365", "0.58850574", "0.58849925", "0.5869666", "0.586526", "0.5851763", "0.5825548", "0.5813523", "0.5806358", "0.5803678", "0.57846886", "0.5783178", "0.5780592" ]
0.6737397
0
Overrides get function then adds a model of type Restaurant to the view whose id = restaurant_id
def get(self, request, pk, *args, **kwargs): self.restaurant = get_object_or_404(Restaurant, id=pk) self.page_title = "{} Information".format(self.restaurant.name) def get_list_or_none(klass, *args, **kwargs): queryset = _get_queryset(klass) obj_list = list(queryset.filter(*args, **kwargs)) if obj_list: return obj_list return [] self.dishes = get_list_or_none(Dish, restaurant=self.restaurant) self.orders = get_list_or_none(Order, restaurant=self.restaurant) return super().get(request, *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, request, restaurant_id, *args, **kwargs):\n self.restaurant = get_object_or_404(Restaurant, id=restaurant_id)\n return super().get(request, *args, **kwargs)", "def get_random_restaurant(self, request, **kwargs):\n restaurant = Restaurant.objects.order_by(\n '?'\n ).select_related(\n 'address'\n ).prefetch_related(\n 'employees'\n ).first()\n serializer = RestaurantFullInfoSerializer(restaurant)\n return Response(serializer.data)", "def restaurants_new():\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n if len(request.form['name']) > 0:\n new_restaurant = Restaurant(name=request.form['name'],\n address=request.form['address'],\n phone=request.form['phone'],\n web=helper.check_restaurant_URL(request.form['web']),\n description=request.form['description'],\n user_id=login_session['user_id'])\n session.add(new_restaurant)\n session.commit()\n flash(\"New restaurant created - {}\".format(new_restaurant.name))\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, new_restaurant.id)\n return redirect(url_for('restaurants_page'))\n else:\n flash(\"Incorrect Restaurant details - Please include a name!\")\n\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newrestaurant.html', user_info=user_info)", "def get_restaurant(id):\r\n with current_app.app_context():\r\n if current_app.config[\"USE_MOCKS\"]:\r\n id -= 1 # restaurant IDs starting by 1\r\n if 0 <= id < len(restaurants):\r\n return restaurants[id]\r\n else:\r\n return None\r\n else:\r\n return get_from(current_app.config[\"REST_SERVICE_URL\"]+\"/restaurants/\"+str(id))", "def __repr__(self):\n return f'<Restaurant id: {self.id}>'", "def restaurantMenuItemNew(restaurant_id):\n try:\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n if request.form['name']:\n newItem = MenuItem(name=request.form['name'], description=request.form[\n 'description'], price=request.form['price'], course=request.form['course'], restaurant_id=restaurant_id)\n session.add(newItem)\n session.commit()\n\n flash('Menu Item Created', 'menu')\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('menuItemNew.html', restaurant=restaurant)\n\n except exc.NoResultFound:\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))", "def get(self, request, *args, **kwargs):\n self.object = RUT.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n ot_linea_form = OT_LineaFormSet(instance=self.object)\n return self.render_to_response(\n self.get_context_data(form=form,\n ot_linea_form=ot_linea_form))", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = User.object.get(id=user_data.id)\n restaurant, created = Restaurant.objects.update_or_create(user=user, data=validated_data)\n return restaurant", "def add_new_review(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n post = request.get_json()\n if 'username' not in login_session:\n new_review = Reviews(reviewer_name='anonymous',\n review=post.get('review'),\n stars=post.get('stars'),\n restaurant_id=restaurant_id,\n time=datetime.utcnow())\n else:\n new_review = Reviews(reviewer_name=login_session['username'],\n review=post.get('review'),\n stars=post.get('stars'),\n restaurant_id=restaurant_id,\n time=datetime.utcnow())\n session.add(new_review)\n session.commit()\n\n return redirect(url_for('restaurants_page'))", "def get_instance(self, *args, **kwargs):\n self.pizza = None\n pk = self.kwargs.get('pk', None)\n if pk:\n try:\n self.pizza = Pizza.objects.get(pk=pk)\n except ObjectDoesNotExist:\n raise Http404(\"No %(verbose_name)s found matching the query\" %\n {'verbose_name': Pizza._meta.verbose_name})", "def _update_restaurant_info(self):\n Restaurant().update(self._entity_id, self._entity_info)", "def restaurants_edit(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n # Find the restaurant\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n # Only edit if the entry was re-written\n if len(request.form['address']) > 0:\n restaurant.address = request.form['address']\n if len(request.form['phone']) > 0:\n restaurant.phone = request.form['phone']\n if len(request.form['web']) > 0:\n restaurant.web = helper.check_restaurant_URL(request.form['web'])\n if len(request.form['tag_line']) > 0:\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n helper.delete_restaurant_tag_pairs(restaurant.id)\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, restaurant.id)\n if len(request.form['description']) > 0:\n restaurant.description = request.form['description']\n\n restaurant.last_update = datetime.utcnow()\n\n session.add(restaurant)\n session.commit()\n flash(\"Restaurant {} edited!\".format(restaurant.name))\n return redirect(url_for('restaurants_page'))\n else:\n # Get user info if the user is signed in to render edit form\n user_info = helper.get_user_if_exists(login_session)\n tag_rest_list = session.query(RestaurantTags).filter_by(restaurant_id=restaurant.id).all()\n tag_line = ''\n # Create a tag line - by compiling the string tag_name for each tag\n for pair in tag_rest_list:\n tag = session.query(Tags).filter_by(id=pair.tag_id).first()\n tag_line += tag.tag_name + ', '\n return render_template('editrestaurant.html',\n restaurant=restaurant,\n tag_line=tag_line,\n user_info=user_info)", "def test_get_restaurant_by_id(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='[email protected]'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['name'], name)", "def get(self, request, *args, **kwargs):\n\n self.get_instance(args, kwargs)\n self.pizza_form = PizzaForm(instance = self.pizza)\n self.topping_usage_formset = ToppingUsageFormSet(instance=self.pizza)\n return self.render_to_response(self.get_context_data())", "def menu_item_new(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n img_id = 0\n if request.method == 'POST':\n if 'file' in request.files:\n print(\"File found\")\n img_id = helper.create_new_image_if_not_exists(file=request.files['file'],\n title=request.form['img_name'])\n new_item = MenuItem(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n course=request.form['course'],\n likes=0,\n dislikes=0,\n restaurant_id=restaurant_id,\n user_id=login_session['user_id'],\n image_id=img_id)\n session.add(new_item)\n session.commit()\n flash(\"New Menu Item {} created!\".format(new_item.name))\n return redirect(url_for('restaurant_menu', restaurant_id=restaurant_id))\n else:\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newmenuitem.html', restaurant=restaurant, user_info=user_info)", "def collect_data(self, data: Restaurant) -> Restaurant:\n return data", "def food_item(request, food_id):\n\n food = get_object_or_404(Nutrition, pk=food_id)\n\n context = {\n 'food': food,\n }\n\n return render(request, 'nutrition/food.html', context)", "def newMenuItem(restaurant_id):\n\n if 'access_token' not in flask_session:\n return logInRedirect()\n restaurant = session.query(Restaurant).filter_by(id = restaurant_id).first()\n user_id = getUserId(flask_session['email'],flask_session['google_plus_id'])\n if not restaurant.user_id == user_id:\n flash(\"Only restaurant owners can add new items.\")\n return redirect(url_for(\"publicMenu\",restaurant_id = restaurant_id))\n\n if request.method == \"POST\":\n new_name = request.form['new_name']\n print \"\\nnewMenuItem POST triggered, name is: \", new_name\n newMenuItem = MenuItem( name=new_name,\n restaurant_id=restaurant.id )\n session.add(newMenuItem)\n session.commit()\n flash( \"new item '\" + new_name + \"' created!\")\n print \"POST worked!\"\n return redirect(url_for(\"showMenu\", restaurant_id=restaurant.id))\n\n else:\n return render_template('newMenuItem.html', restaurant = restaurant)", "def edit_restaurant(restaurant_id):\n user_id = login_session['user_id']\n r = read_restaurants(restaurant_id, user_id)\n if r[1] is True: # Means if user is owner\n if request.method == 'GET':\n return render_template('restaurants/editrestaurant.html',\n restaurant=r[0][0])\n elif request.method == 'POST':\n # Got post request -> First we get the request arguemnts\n name = request.form['name']\n address = request.form['address']\n city = request.form['city']\n state = request.form['state']\n zipCode = request.form['zipCode']\n # Next we do the db edit\n update_restaurant(restaurant_id, name, address,\n city, state, zipCode)\n # Finally we return the success html\n flash(\"Edited your restaurant\")\n return render_template(\"submitted.html\")\n else:\n return \"Invalid http\"\n else:\n flash(\"You need to be the owner of the restaurant to edit\")\n return redirect(url_for('site.show_restaurants',\n restaurant_id=restaurant_id))", "def retrieve(self, request, pk=None):\n\n\n \n\n\n try:\n # `pk` is a parameter to this function, and\n # Django parses it from the URL route parameter\n # http://localhost:8000/Posts/2\n #\n # The `2` at the end of the route becomes `pk`\n post = Post.objects.get(pk=pk)\n reactions = Reaction.objects.all()\n\n # Creates an empty list for reactions custom property set in model, and then filters through postReactions to provide objects with a\n # key/value pair of reaction label/number of that reaction the post has \n\n post.reactions=[]\n\n for reaction in reactions:\n number_of_reactions = PostReaction.objects.filter(post=post, reaction=reaction).count()\n post.reactions.append({reaction.label: number_of_reactions})\n\n associated_tags=Tag.objects.filter(related_post__post=post)\n user = RareUser.objects.get(user=request.auth.user)\n\n all_tags=serializer=TagSerializer(associated_tags, many=True, context={'request',request})\n my_post=serializer = PostSerializer(post, context={'request': request})\n \n single_post={}\n single_post['post']=my_post.data\n single_post['tags']=all_tags.data\n if user == post.user:\n single_post['myPosts']=True \n\n return Response(single_post)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def test_get_restaurant_by_id_none(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='[email protected]'))\n db.session.commit()\n\n # Since this is a freshly created table, the only id should be 1.\n # id 2 does not exist.\n resp = self.test_client.get(self.API_BASE + '/2', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 404)", "def register_restaurant(self, id, location, meals_list):\r\n r = Restaurant(id)\r\n r.set_location(location)\r\n r.set_meals_offered_list(meals_list)\r\n self._restaurants_list.append(r)", "def newRestaurantPage():\n if 'username' not in login_session:\n return redirect('/login')\n if request.method == 'POST':\n res_name = request.form['res_name']\n user_id = login_session['user_id']\n if res_name:\n db_methods.addNewRestaurant(res_name, user_id)\n time.sleep(0.1)\n return redirect(\"/restaurants\")\n else:\n error = \"You need to enter the name of the restaurant you want to add.\"\n return render_template('newrestaurant.html', error = error)\n else:\n return render_template('newrestaurant.html')", "def get(self, request, *args, **kwargs):\n self.object = Presupuesto.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n instrumento_linea_form = Instrumento_LineaFormSet(instance=self.object)\n return self.render_to_response(\n self.get_context_data(form=form,\n instrumento_linea_form=instrumento_linea_form))", "def get(self, request, *args, **kwargs):\n self.object = OT.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n factura_form = Factura_LineaFormSet(instance=self.object)\n remito_form = Remito_LineaFormSet(instance=self.object)\n ot_linea_form = OT_LineaFormSet(instance=self.object)\n return self.render_to_response(\n self.get_context_data(form=form,\n factura_form=factura_form,\n remito_form=remito_form,\n ot_linea_form=ot_linea_form))", "def get(self, request, *args, **kwargs):\n self.object = SI.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n tarea_linea_form = Tarea_LineaFormSet(instance=self.object)\n return self.render_to_response(\n self.get_context_data(form=form,\n tarea_linea_form=tarea_linea_form))", "def get_recipe(self, _id):\n raise NotImplementedError()", "def Restaurant_get_info() -> Restaurant:\r\n name = input(\"Please enter the restaurant's name: \")\r\n cuisine = input(\"Please enter the kind of food served: \")\r\n phone = input(\"Please enter the phone number: \")\r\n menu = menu_enter()\r\n return Restaurant(name, cuisine, phone, menu)", "def get(self, request, *args, **kwargs):\n self.object = SOT.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n ot_linea_form = OT_LineaFormSet(instance=self.object)\n return self.render_to_response(\n self.get_context_data(form=form,\n ot_linea_form=ot_linea_form))", "def review_add(request):\n result = {}\n\n u = request.user\n\n p = Product.objects.get_by_sku(request.POST['sku'])\n\n if p is None:\n result[\"result\"] = '0'\n elif TransactionLineItem.objects.filter(transaction__party=u, product=p).count() > 0:\n # need to check if I bought this item\n\n r, created = Review.objects.get_or_create(reviewer=u, product=p)\n r.content =request.POST['content']\n r.rating=int(request.POST['rating'])\n\n # reply to review request\n rto = request.POST.get('reply_to', None)\n if rto:\n rev_request = ReviewRequest.objects.get(id=int(rto))\n r.reply_to.add(rev_request)\n # change wish item review status to review=2\n for w in Wishlist.objects.filter(product=p, party=rev_request.requester):\n w.review = Wishlist.REVIEW_RESPONDED\n w.save()\n \n r.public = bool(request.POST['public'])\n r.save() \n\n # add a feed\n f = Feed(actor=u, action=Feed.REVIEWED, product=p) \n f.save()\n \n result[\"result\"] = str(r.id)\n else:\n result['result'] = '-1'\n\n return JSONHttpResponse(result)" ]
[ "0.82593495", "0.6128801", "0.5976258", "0.59254146", "0.57850933", "0.577613", "0.5775901", "0.57478505", "0.5729911", "0.5692035", "0.56844246", "0.56034964", "0.5585453", "0.5549611", "0.54296017", "0.5421414", "0.5405741", "0.5395842", "0.5391386", "0.5368653", "0.53548074", "0.53489804", "0.53412545", "0.5335105", "0.53073496", "0.52991784", "0.52829754", "0.526443", "0.5238383", "0.5232462" ]
0.6749007
1
Overrides get function then adds a model of type Restaurant to the view whose id = restaurant_id
def get(self, request, restaurant_id, *args, **kwargs): self.restaurant = get_object_or_404(Restaurant, id=restaurant_id) return super().get(request, *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, request, pk, *args, **kwargs):\n self.restaurant = get_object_or_404(Restaurant, id=pk)\n self.page_title = \"{} Information\".format(self.restaurant.name)\n\n def get_list_or_none(klass, *args, **kwargs):\n queryset = _get_queryset(klass)\n obj_list = list(queryset.filter(*args, **kwargs))\n if obj_list:\n return obj_list\n return []\n\n self.dishes = get_list_or_none(Dish, restaurant=self.restaurant)\n self.orders = get_list_or_none(Order, restaurant=self.restaurant)\n return super().get(request, *args, **kwargs)", "def get_random_restaurant(self, request, **kwargs):\n restaurant = Restaurant.objects.order_by(\n '?'\n ).select_related(\n 'address'\n ).prefetch_related(\n 'employees'\n ).first()\n serializer = RestaurantFullInfoSerializer(restaurant)\n return Response(serializer.data)", "def restaurants_new():\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n if len(request.form['name']) > 0:\n new_restaurant = Restaurant(name=request.form['name'],\n address=request.form['address'],\n phone=request.form['phone'],\n web=helper.check_restaurant_URL(request.form['web']),\n description=request.form['description'],\n user_id=login_session['user_id'])\n session.add(new_restaurant)\n session.commit()\n flash(\"New restaurant created - {}\".format(new_restaurant.name))\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, new_restaurant.id)\n return redirect(url_for('restaurants_page'))\n else:\n flash(\"Incorrect Restaurant details - Please include a name!\")\n\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newrestaurant.html', user_info=user_info)", "def get_restaurant(id):\r\n with current_app.app_context():\r\n if current_app.config[\"USE_MOCKS\"]:\r\n id -= 1 # restaurant IDs starting by 1\r\n if 0 <= id < len(restaurants):\r\n return restaurants[id]\r\n else:\r\n return None\r\n else:\r\n return get_from(current_app.config[\"REST_SERVICE_URL\"]+\"/restaurants/\"+str(id))", "def __repr__(self):\n return f'<Restaurant id: {self.id}>'", "def restaurantMenuItemNew(restaurant_id):\n try:\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n if request.form['name']:\n newItem = MenuItem(name=request.form['name'], description=request.form[\n 'description'], price=request.form['price'], course=request.form['course'], restaurant_id=restaurant_id)\n session.add(newItem)\n session.commit()\n\n flash('Menu Item Created', 'menu')\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('menuItemNew.html', restaurant=restaurant)\n\n except exc.NoResultFound:\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))", "def get(self, request, *args, **kwargs):\n self.object = RUT.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n ot_linea_form = OT_LineaFormSet(instance=self.object)\n return self.render_to_response(\n self.get_context_data(form=form,\n ot_linea_form=ot_linea_form))", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = User.object.get(id=user_data.id)\n restaurant, created = Restaurant.objects.update_or_create(user=user, data=validated_data)\n return restaurant", "def add_new_review(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n post = request.get_json()\n if 'username' not in login_session:\n new_review = Reviews(reviewer_name='anonymous',\n review=post.get('review'),\n stars=post.get('stars'),\n restaurant_id=restaurant_id,\n time=datetime.utcnow())\n else:\n new_review = Reviews(reviewer_name=login_session['username'],\n review=post.get('review'),\n stars=post.get('stars'),\n restaurant_id=restaurant_id,\n time=datetime.utcnow())\n session.add(new_review)\n session.commit()\n\n return redirect(url_for('restaurants_page'))", "def get_instance(self, *args, **kwargs):\n self.pizza = None\n pk = self.kwargs.get('pk', None)\n if pk:\n try:\n self.pizza = Pizza.objects.get(pk=pk)\n except ObjectDoesNotExist:\n raise Http404(\"No %(verbose_name)s found matching the query\" %\n {'verbose_name': Pizza._meta.verbose_name})", "def _update_restaurant_info(self):\n Restaurant().update(self._entity_id, self._entity_info)", "def restaurants_edit(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n # Find the restaurant\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n # Only edit if the entry was re-written\n if len(request.form['address']) > 0:\n restaurant.address = request.form['address']\n if len(request.form['phone']) > 0:\n restaurant.phone = request.form['phone']\n if len(request.form['web']) > 0:\n restaurant.web = helper.check_restaurant_URL(request.form['web'])\n if len(request.form['tag_line']) > 0:\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n helper.delete_restaurant_tag_pairs(restaurant.id)\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, restaurant.id)\n if len(request.form['description']) > 0:\n restaurant.description = request.form['description']\n\n restaurant.last_update = datetime.utcnow()\n\n session.add(restaurant)\n session.commit()\n flash(\"Restaurant {} edited!\".format(restaurant.name))\n return redirect(url_for('restaurants_page'))\n else:\n # Get user info if the user is signed in to render edit form\n user_info = helper.get_user_if_exists(login_session)\n tag_rest_list = session.query(RestaurantTags).filter_by(restaurant_id=restaurant.id).all()\n tag_line = ''\n # Create a tag line - by compiling the string tag_name for each tag\n for pair in tag_rest_list:\n tag = session.query(Tags).filter_by(id=pair.tag_id).first()\n tag_line += tag.tag_name + ', '\n return render_template('editrestaurant.html',\n restaurant=restaurant,\n tag_line=tag_line,\n user_info=user_info)", "def test_get_restaurant_by_id(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='[email protected]'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['name'], name)", "def get(self, request, *args, **kwargs):\n\n self.get_instance(args, kwargs)\n self.pizza_form = PizzaForm(instance = self.pizza)\n self.topping_usage_formset = ToppingUsageFormSet(instance=self.pizza)\n return self.render_to_response(self.get_context_data())", "def menu_item_new(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n img_id = 0\n if request.method == 'POST':\n if 'file' in request.files:\n print(\"File found\")\n img_id = helper.create_new_image_if_not_exists(file=request.files['file'],\n title=request.form['img_name'])\n new_item = MenuItem(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n course=request.form['course'],\n likes=0,\n dislikes=0,\n restaurant_id=restaurant_id,\n user_id=login_session['user_id'],\n image_id=img_id)\n session.add(new_item)\n session.commit()\n flash(\"New Menu Item {} created!\".format(new_item.name))\n return redirect(url_for('restaurant_menu', restaurant_id=restaurant_id))\n else:\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newmenuitem.html', restaurant=restaurant, user_info=user_info)", "def collect_data(self, data: Restaurant) -> Restaurant:\n return data", "def food_item(request, food_id):\n\n food = get_object_or_404(Nutrition, pk=food_id)\n\n context = {\n 'food': food,\n }\n\n return render(request, 'nutrition/food.html', context)", "def newMenuItem(restaurant_id):\n\n if 'access_token' not in flask_session:\n return logInRedirect()\n restaurant = session.query(Restaurant).filter_by(id = restaurant_id).first()\n user_id = getUserId(flask_session['email'],flask_session['google_plus_id'])\n if not restaurant.user_id == user_id:\n flash(\"Only restaurant owners can add new items.\")\n return redirect(url_for(\"publicMenu\",restaurant_id = restaurant_id))\n\n if request.method == \"POST\":\n new_name = request.form['new_name']\n print \"\\nnewMenuItem POST triggered, name is: \", new_name\n newMenuItem = MenuItem( name=new_name,\n restaurant_id=restaurant.id )\n session.add(newMenuItem)\n session.commit()\n flash( \"new item '\" + new_name + \"' created!\")\n print \"POST worked!\"\n return redirect(url_for(\"showMenu\", restaurant_id=restaurant.id))\n\n else:\n return render_template('newMenuItem.html', restaurant = restaurant)", "def edit_restaurant(restaurant_id):\n user_id = login_session['user_id']\n r = read_restaurants(restaurant_id, user_id)\n if r[1] is True: # Means if user is owner\n if request.method == 'GET':\n return render_template('restaurants/editrestaurant.html',\n restaurant=r[0][0])\n elif request.method == 'POST':\n # Got post request -> First we get the request arguemnts\n name = request.form['name']\n address = request.form['address']\n city = request.form['city']\n state = request.form['state']\n zipCode = request.form['zipCode']\n # Next we do the db edit\n update_restaurant(restaurant_id, name, address,\n city, state, zipCode)\n # Finally we return the success html\n flash(\"Edited your restaurant\")\n return render_template(\"submitted.html\")\n else:\n return \"Invalid http\"\n else:\n flash(\"You need to be the owner of the restaurant to edit\")\n return redirect(url_for('site.show_restaurants',\n restaurant_id=restaurant_id))", "def retrieve(self, request, pk=None):\n\n\n \n\n\n try:\n # `pk` is a parameter to this function, and\n # Django parses it from the URL route parameter\n # http://localhost:8000/Posts/2\n #\n # The `2` at the end of the route becomes `pk`\n post = Post.objects.get(pk=pk)\n reactions = Reaction.objects.all()\n\n # Creates an empty list for reactions custom property set in model, and then filters through postReactions to provide objects with a\n # key/value pair of reaction label/number of that reaction the post has \n\n post.reactions=[]\n\n for reaction in reactions:\n number_of_reactions = PostReaction.objects.filter(post=post, reaction=reaction).count()\n post.reactions.append({reaction.label: number_of_reactions})\n\n associated_tags=Tag.objects.filter(related_post__post=post)\n user = RareUser.objects.get(user=request.auth.user)\n\n all_tags=serializer=TagSerializer(associated_tags, many=True, context={'request',request})\n my_post=serializer = PostSerializer(post, context={'request': request})\n \n single_post={}\n single_post['post']=my_post.data\n single_post['tags']=all_tags.data\n if user == post.user:\n single_post['myPosts']=True \n\n return Response(single_post)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def test_get_restaurant_by_id_none(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='[email protected]'))\n db.session.commit()\n\n # Since this is a freshly created table, the only id should be 1.\n # id 2 does not exist.\n resp = self.test_client.get(self.API_BASE + '/2', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 404)", "def register_restaurant(self, id, location, meals_list):\r\n r = Restaurant(id)\r\n r.set_location(location)\r\n r.set_meals_offered_list(meals_list)\r\n self._restaurants_list.append(r)", "def newRestaurantPage():\n if 'username' not in login_session:\n return redirect('/login')\n if request.method == 'POST':\n res_name = request.form['res_name']\n user_id = login_session['user_id']\n if res_name:\n db_methods.addNewRestaurant(res_name, user_id)\n time.sleep(0.1)\n return redirect(\"/restaurants\")\n else:\n error = \"You need to enter the name of the restaurant you want to add.\"\n return render_template('newrestaurant.html', error = error)\n else:\n return render_template('newrestaurant.html')", "def get(self, request, *args, **kwargs):\n self.object = Presupuesto.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n instrumento_linea_form = Instrumento_LineaFormSet(instance=self.object)\n return self.render_to_response(\n self.get_context_data(form=form,\n instrumento_linea_form=instrumento_linea_form))", "def get(self, request, *args, **kwargs):\n self.object = OT.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n factura_form = Factura_LineaFormSet(instance=self.object)\n remito_form = Remito_LineaFormSet(instance=self.object)\n ot_linea_form = OT_LineaFormSet(instance=self.object)\n return self.render_to_response(\n self.get_context_data(form=form,\n factura_form=factura_form,\n remito_form=remito_form,\n ot_linea_form=ot_linea_form))", "def get(self, request, *args, **kwargs):\n self.object = SI.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n tarea_linea_form = Tarea_LineaFormSet(instance=self.object)\n return self.render_to_response(\n self.get_context_data(form=form,\n tarea_linea_form=tarea_linea_form))", "def get_recipe(self, _id):\n raise NotImplementedError()", "def Restaurant_get_info() -> Restaurant:\r\n name = input(\"Please enter the restaurant's name: \")\r\n cuisine = input(\"Please enter the kind of food served: \")\r\n phone = input(\"Please enter the phone number: \")\r\n menu = menu_enter()\r\n return Restaurant(name, cuisine, phone, menu)", "def get(self, request, *args, **kwargs):\n self.object = SOT.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n ot_linea_form = OT_LineaFormSet(instance=self.object)\n return self.render_to_response(\n self.get_context_data(form=form,\n ot_linea_form=ot_linea_form))", "def review_add(request):\n result = {}\n\n u = request.user\n\n p = Product.objects.get_by_sku(request.POST['sku'])\n\n if p is None:\n result[\"result\"] = '0'\n elif TransactionLineItem.objects.filter(transaction__party=u, product=p).count() > 0:\n # need to check if I bought this item\n\n r, created = Review.objects.get_or_create(reviewer=u, product=p)\n r.content =request.POST['content']\n r.rating=int(request.POST['rating'])\n\n # reply to review request\n rto = request.POST.get('reply_to', None)\n if rto:\n rev_request = ReviewRequest.objects.get(id=int(rto))\n r.reply_to.add(rev_request)\n # change wish item review status to review=2\n for w in Wishlist.objects.filter(product=p, party=rev_request.requester):\n w.review = Wishlist.REVIEW_RESPONDED\n w.save()\n \n r.public = bool(request.POST['public'])\n r.save() \n\n # add a feed\n f = Feed(actor=u, action=Feed.REVIEWED, product=p) \n f.save()\n \n result[\"result\"] = str(r.id)\n else:\n result['result'] = '-1'\n\n return JSONHttpResponse(result)" ]
[ "0.6746951", "0.6127299", "0.59782755", "0.59246576", "0.5785238", "0.5777184", "0.577502", "0.5749527", "0.5731349", "0.56908524", "0.5684393", "0.56052846", "0.5584952", "0.5548897", "0.54310036", "0.5422598", "0.54060316", "0.5397571", "0.5391655", "0.53670406", "0.5354484", "0.5351371", "0.5342842", "0.5334125", "0.53063476", "0.5298668", "0.528275", "0.52643746", "0.523739", "0.523398" ]
0.82582694
0
wget twitter URL and parse out URLs
def get_trend_urls(): trends = [] twitter = 'http://search.twitter.com/' tmp = 'tmp' + str(random.randint(0,1000)) os.system('wget %s --output-document=%s' % (twitter, tmp)) with open(tmp) as f: for line in f: if 'a href' in line and 'search?q' in line: trends.append(twitter + line.split('a href=\"/')[1].split('\"')[0]) os.system('rm %s' % tmp) return trends
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_url():\n urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',\n new_tweet)\n return urls", "def get_links_from_tweet(tweet):\n if tweet.has_key('entities'):\n if tweet['entities'].has_key('urls'):\n if tweet['entities']['urls']:\n return [t['expanded_url'] for t in tweet['entities']['urls']]\n\n return None", "def getTwitterscraperTweets():\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets", "def extract_url_download(update: Update, context: CallbackContext) -> None:\r\n received_text = update.message.text\r\n yt_urls = get_link_text(received_text)\r\n yt_urls_msg = update.message.reply_text(pretty_url_string(yt_urls), disable_web_page_preview=True)\r\n if len(yt_urls) > 0:\r\n for url in yt_urls:\r\n if 'list=' in url:\r\n print(\"dshgshj\")\r\n\t\t\t\t# download_playlist_url(update, context, url)\r\n else:\r\n download_url(update, context, url)\r\n context.bot.delete_message(message_id=yt_urls_msg.message_id, chat_id=yt_urls_msg.chat_id)", "def find_urls(url):\n try:\n #sock = urllib2.urlopen(url)\n result = urlfetch.fetch(url)\n sock = result.content\n parser = URLParser()\n #print sock.read()\n parser.feed(sock.read())\n sock.close()\n parser.close()\n return parser.urls\n except: # This is to take care of links that are not valid.\n return []", "def parse_tweet(line):\n # The following regex just strips of an URL (not just http), any punctuations,\n # or Any non alphanumeric characters\n # http://goo.gl/J8ZxDT\n text = re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\",\" \",json.loads(line[1])[\"value\"]).strip()\n # remove terms <= 2 characters\n text = ' '.join(filter(lambda x: len(x) > 2, text.split(\" \")))\n\n return (line[0], text)", "def construct_url(screen_name):\n number_of_tweets = \"200\"\n urls = []\n for x in xrange(1, 6):\n urls.append('https://api.twitter.com/1.1/statuses/user_timeline.json?'\n 'screen_name=' + screen_name + '&count=' +\n number_of_tweets + '&page=' + str(x))\n return urls", "def test_twitter_shortlink(self):\n\n test = Unfurl(remote_lookups=True)\n test.add_to_queue(data_type='url', key=None, value='https://t.co/g6VWYYwY12')\n test.parse_queue()\n\n # test number of nodes\n self.assertEqual(len(test.nodes.keys()), 18)\n self.assertEqual(test.total_nodes, 18)\n\n self.assertEqual(test.nodes[4].value, '/g6VWYYwY12')\n self.assertEqual(test.nodes[11].value, 'github.com')\n self.assertEqual(test.nodes[16].label, '1: obsidianforensics')\n\n # is processing finished empty\n self.assertTrue(test.queue.empty())\n self.assertEqual(len(test.edges), 0)", "def get_most_unrolled_urls(tweet):\n unrolled_urls = []\n for url in tweet.tweet_links:\n if \"unwound\" in url:\n unrolled_urls.append(url[\"unwound\"][\"url\"])\n elif \"expanded_url\" in url:\n unrolled_urls.append(url[\"expanded_url\"])\n else:\n unrolled_urls.append(url[\"url\"])\n return unrolled_urls", "def twitterCrawl(self, strInput):\n strVal = self.txtTwitter.get(\"1.0\", 'end')\n if (strVal.strip()):\n self.txtTwitter.delete(\"1.0\", 'end')\n twitResult = twit.search(strInput)\n return twitResult", "def gather_headlines(urls):\n pass", "def remove_url(txt):\n print(txt['fields']['tweet'])\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt['fields']['tweet']).split())", "def clean_urls(self, tweet):\n self.urls = re.findall(self.regexpForURLs, tweet)\n\n for url in self.urls:\n tweet = tweet.replace(url, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n return tweet", "def tweet_url(username, id):\n return 'http://twitter.com/%s/status/%d' % (username, id)", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def getURLs():", "def remove_urls(lista_tweets):\n\n novos_tweets = []\n\n for tweet in lista_tweets:\n texto = re.sub(r\"http\\S+\", \"\", tweet[\"text\"])\n novos_tweets.append(texto)\n\n return novos_tweets", "def getUrls(url):\n f = requests.get(url)\n p = MyParser()\n p.feed(f.text)\n list_of_urls = p.output_list\n #deal with possible strange None values\n list_of_urls = [url for url in list_of_urls if url is not None]\n for url in list_of_urls:\n if 'http' not in url: list_of_urls.remove(url)\n return list_of_urls", "def topictweets(url):\n article = get_article(url)\n keywords = get_keywords(article['text'])\n entities = get_entities(article['text'])\n q = twitter_query(keywords, entities)\n result = search({'q': q, 'count': 100, 'result_type': 'mixed'})\n tweets = screen_name_filter(result.statuses, 'media')\n return tweets", "def get_tweets(query, pages=25):\n\n logger = Logger()\n after_part = 'include_available_features=1&include_entities=1&include_new_items_bar=true'\n if query.startswith('#'):\n query = quote(query)\n url = 'https://twitter.com/i/search/timeline?f=tweets&vertical=default&q={}&src=tyah&reset_error_state=false&'.format(query)\n else:\n url = 'https://twitter.com/i/profiles/show/{}/timeline/tweets?'.format(query)\n url += after_part\n \n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Referer': 'https://twitter.com/{}'.format(query),\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'X-Twitter-Active-User': 'yes',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Accept-Language': 'en-US'\n }\n\n def gen_tweets(pages):\n logger.add(\"MSG: Sending request to url '{}'...\".format(url))\n r = session.get(url, headers=headers)\n\n logger.add(\"MSG: Parsing result...\".format(url))\n while pages > 0:\n try:\n html = BeautifulSoup(r.json()['items_html'], parser='html', features=\"lxml\")\n except KeyError:\n raise ValueError(\n 'Oops! Either \"{}\" does not exist or is private.'.format(query))\n\n comma = \",\"\n dot = \".\"\n tweets = []\n for tweet in html.select('.stream-item'):\n # 10~11 html elements have `.stream-item` class and also their `data-item-type` is `tweet`\n # but their content doesn't look like a tweet's content\n try:\n text = tweet.select('.tweet-text')[0].get_text()\n except IndexError: # issue #50\n continue\n\n tweet_id = tweet['data-item-id']\n\n time = datetime.fromtimestamp(int(tweet.select('._timestamp')[0]['data-time-ms']) / 1000.0)\n\n interactions = [\n x.get_text()\n for x in tweet.select('.ProfileTweet-actionCount')\n ]\n\n replies = int(\n interactions[0].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[3]\n )\n\n retweets = int(\n interactions[1].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[4]\n or interactions[5]\n )\n\n likes = int(\n interactions[2].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[6]\n or interactions[7]\n )\n\n hashtags = [\n hashtag_node.get_text()\n for hashtag_node in tweet.select('.twitter-hashtag')\n ]\n urls = [\n url_node['data-expanded-url']\n for url_node in tweet.select('a.twitter-timeline-link:not(.u-hidden)')\n ]\n photos = [\n photo_node['data-image-url']\n for photo_node in tweet.select('.AdaptiveMedia-photoContainer')\n ]\n\n is_retweet = False\n if tweet.select('.js-stream-tweet')[0].has_attr('data-retweet-id'):\n is_retweet = True\n\n is_pinned = False\n if tweet.select(\".pinned\"):\n is_pinned = True\n\n videos = []\n video_nodes = tweet.select(\".PlayableMedia-player\")\n for node in video_nodes:\n styles = node['style'].split()\n for style in styles:\n if style.startswith('background'):\n tmp = style.split('/')[-1]\n video_id = tmp[:tmp.index('.jpg')]\n videos.append({'id': video_id})\n\n tweets.append({\n 'tweetId': tweet_id,\n 'isRetweet': is_retweet,\n 'time': time,\n 'text': text,\n 'replies': replies,\n 'retweets': retweets,\n 'likes': likes,\n 'isPinned': is_pinned,\n 'entries': {\n 'hashtags': hashtags, 'urls': urls,\n 'photos': photos, 'videos': videos\n }\n })\n\n\n last_tweet = html.select('.stream-item')[-1]['data-item-id']\n\n for tweet in tweets:\n if tweet:\n tweet['text'] = re.sub(r'\\Shttp', ' http', tweet['text'], 1)\n tweet['text'] = re.sub(r'\\Spic\\.twitter', ' pic.twitter', tweet['text'], 1)\n yield tweet\n\n r = session.get(url, params={'max_position': last_tweet}, headers=headers)\n pages += -1\n yield from gen_tweets(pages)", "def separate_hastags_mentions_urls(tweet):\n \n text = tweet.lower()\n hashtag_list = re.findall(\"#([a-zA-Z0-9_]{1,50})\", text)\n \n text = re.sub(r'http\\S+', '', text)\n clean_tweet = re.sub(\"@[A-Za-z0-9_]+\",\"\", text)\n clean_tweet = re.sub(\"#[A-Za-z0-9_]+\",\"\", clean_tweet)\n \n return clean_tweet, hashtag_list", "def text_to_tweets(text, url):\n max_chars = 240 - 1 - 23 # one removed for punctuation 22 removed for link.\n tweets = [] # buffer of tweets to send\n tweet = \"\" # the current tweet we are composing\n while len(text) > 0: # while we still have text ...\n try:\n while len(tweet) + len(text[0]) + 1 < max_chars:\n # as long as the composed tweet is one less than the character limit\n phrase = text.pop(0)\n if phrase not in [\"? \", \". \", \"! \"]: # If the next piece of text is not punctuation ...\n tweet += \" \" # ... Add a space\n tweet += phrase # and add the text\n else:\n tweet += phrase[0]\n\n # if the net character is a punctuation mark\n if text[0] in [\"? \", \". \"]: # if the next char is a punctuation mark\n\n tweet += text.pop(0)[0] # add it to the end of the tweet\n else:\n tweet += u'…' # otherwise '...'\n except IndexError:\n print(\"INDEX ERROR\") # ... something went wrong ...\n\n if len(tweets) == 0 and url is not None:\n # If there are presently no tweets we need to add the blog link to the post\n # This tells someone where to see your posts.\n max_chars = 240 - 1 # we can now use more characters.\n tweet += \" \" + url #\n\n tweets.append(tweet)\n tweet = \"\"\n\n return tweets", "def findLink(status):\n link = False\n try:\n match2 = re.findall(r'bit.ly[\\w./:0-9]*', status)\n if match2:\n link = match2[0]\n #Find full urls\n match = re.findall(r'http[\\w./:0-9]*', status)\n if match:\n link = match[0]\n resp = urllib.urlopen(link)\n if resp.url:\n link = resp.url\n else:\n link = False\n except:\n link = False\n return link", "def getwithinfo(url):\n attemptcount = 0\n max_attempts = 10\n retry_delay = 10\n request_delay = 0.5#avoid hammering the site too hard\n assert_is_string(url)\n deescaped_url = deescape(url)\n url_with_protocol = add_http(deescaped_url)\n # Remove all ssl because ATC said to\n # http://stackoverflow.com/questions/19268548/python-ignore-certicate-validation-urllib2\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n while attemptcount < max_attempts:\n attemptcount = attemptcount + 1\n if attemptcount > 1:\n delay(retry_delay)\n logging.debug( \"Attempt \"+repr(attemptcount)+\" for URL: \"+repr(url) )\n try:\n## save_file(\n## file_path = os.path.join(\"debug\",\"get_last_url.txt\"),\n## data = url,\n## force_save = True,\n## allow_fail = True\n## )\n delay(request_delay)\n request = urllib2.Request(url_with_protocol)\n request.add_header(\"User-agent\", \"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1\")\n #request.add_header('Referer', 'http://www.tumblr.com/')\n\n r = urllib2.urlopen(\n request,\n context=ctx\n )\n info = r.info()\n reply = r.read()\n## # Save html responses for debugging\n## if \"html\" in info[\"content-type\"]:\n## save_file(\n## file_path = os.path.join(\"debug\",\"get_last_html.htm\"),\n## data = reply,\n## force_save = True,\n## allow_fail = True\n## )\n## else:\n## pass\n## save_file(\n## file_path = os.path.join(\"debug\",\"get_last_not_html.txt\"),\n## data = reply,\n## force_save = True,\n## allow_fail = True\n## )\n # Retry if empty response and not last attempt\n if (len(reply) < 1) and (attemptcount < max_attempts):\n logging.error(\"Reply too short :\"+repr(reply))\n continue\n\n return reply,info,r\n except urllib2.HTTPError, err:\n logging.exception(err)\n logging.error(repr(err))\n if err.code == 404:\n logging.error(\"404 error! \"+repr(url))\n return\n elif err.code == 403:\n logging.error(\"403 error, ACCESS DENIED! url: \"+repr(url))\n return\n elif err.code == 410:\n logging.error(\"410 error, GONE\")\n return\n else:\n save_file(\n file_path = os.path.join(\"debug\",\"HTTPError.htm\"),\n data = err.fp.read(),\n force_save = True,\n allow_fail = True\n )\n continue\n\n except urllib2.URLError, err:\n logging.exception(err)\n logging.error(repr(err))\n if \"unknown url type:\" in err.reason:\n return\n else:\n continue\n\n except httplib.BadStatusLine, err:\n logging.exception(err)\n logging.error(repr(err))\n continue\n\n except httplib.IncompleteRead, err:\n logging.exception(err)\n logging.error(repr(err))\n logging.exception(err)\n continue\n\n except socket.timeout, err:\n logging.exception(err)\n logging.error(repr( type(err) ) )\n logging.error(repr(err))\n continue\n\n except Exception, err:\n logging.exception(err)\n # We have to do this because socket.py just uses \"raise\"\n logging.error(\"getwithinfo() caught an exception\")\n logging.error(\"getwithinfo() repr(err):\"+repr(err))\n logging.error(\"getwithinfo() str(err):\"+str(err))\n logging.error(\"getwithinfo() type(err):\"+repr(type(err)))\n continue\n\n logging.error(\"Too many retries, failing.\")\n return", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def parse_url(url):\n # This is a dirty hack; but it's the only work around to tgram://\n # messages since the bot_token has a colon in it. It invalidates a\n # normal URL.\n\n # This hack searches for this bogus URL and corrects it so we can\n # properly load it further down. The other alternative is to ask users\n # to actually change the colon into a slash (which will work too), but\n # it's more likely to cause confusion... So this is the next best thing\n # we also check for %3A (incase the URL is encoded) as %3A == :\n try:\n tgram = re.match(\n r'(?P<protocol>{schema}://)(bot)?(?P<prefix>([a-z0-9_-]+)'\n r'(:[a-z0-9_-]+)?@)?(?P<btoken_a>[0-9]+)(:|%3A)+'\n r'(?P<remaining>.*)$'.format(\n schema=NotifyTelegram.secure_protocol), url, re.I)\n\n except (TypeError, AttributeError):\n # url is bad; force tgram to be None\n tgram = None\n\n if not tgram:\n # Content is simply not parseable\n return None\n\n if tgram.group('prefix'):\n # Try again\n results = NotifyBase.parse_url('%s%s%s/%s' % (\n tgram.group('protocol'),\n tgram.group('prefix'),\n tgram.group('btoken_a'),\n tgram.group('remaining')), verify_host=False)\n\n else:\n # Try again\n results = NotifyBase.parse_url('%s%s/%s' % (\n tgram.group('protocol'),\n tgram.group('btoken_a'),\n tgram.group('remaining')), verify_host=False)\n\n # The first token is stored in the hostname\n bot_token_a = NotifyTelegram.unquote(results['host'])\n\n # Get a nice unquoted list of path entries\n entries = NotifyTelegram.split_path(results['fullpath'])\n\n # Now fetch the remaining tokens\n bot_token_b = entries.pop(0)\n\n bot_token = '%s:%s' % (bot_token_a, bot_token_b)\n\n # Store our chat ids (as these are the remaining entries)\n results['targets'] = entries\n\n # content to be displayed 'before' or 'after' attachments\n if 'content' in results['qsd'] and len(results['qsd']['content']):\n results['content'] = results['qsd']['content']\n\n # Support the 'to' variable so that we can support rooms this way too\n # The 'to' makes it easier to use yaml configuration\n if 'to' in results['qsd'] and len(results['qsd']['to']):\n results['targets'] += \\\n NotifyTelegram.parse_list(results['qsd']['to'])\n\n # Store our bot token\n results['bot_token'] = bot_token\n\n # Support Thread Topic\n if 'topic' in results['qsd'] and len(results['qsd']['topic']):\n results['topic'] = results['qsd']['topic']\n\n # Silent (Sends the message Silently); users will receive\n # notification with no sound.\n results['silent'] = \\\n parse_bool(results['qsd'].get('silent', False))\n\n # Show Web Page Preview\n results['preview'] = \\\n parse_bool(results['qsd'].get('preview', False))\n\n # Include images with our message\n results['include_image'] = \\\n parse_bool(results['qsd'].get('image', False))\n\n # Include images with our message\n results['detect_owner'] = \\\n parse_bool(results['qsd'].get('detect', True))\n\n return results", "def scan_links_from_url(url):\n\n\t#Get the url\n\thtml_io = StringIO.StringIO()\n\n\tcurl = pycurl.Curl()\n\tcurl.setopt(pycurl.URL, str(url))\n\tcurl.setopt(pycurl.WRITEFUNCTION, html_io.write)\n\tcurl.perform()\n\n\thtml = html_io.getvalue()\n\n\thtml_io.close()\n\tcurl.close()\n\n\t#Apply the regex expression and fetch all links from source\n\tregexp = re.compile(\"\"\"http\\:\\/\\/rapidshare\\.(?:com|de)\\/files\\/[\\d]*\\/.*?\\..*?[^\"\\s\\<\\>]*[^.,;'\">\\:\\s\\<\\>\\)\\]\\!]\"\"\")\n\n\treturn regexp.findall(html)", "def getindex(url):\n try:\n req = urllib2.Request(url)\n f = urllib2.urlopen(req)\n return [ line.strip().split(':') for line in f.readlines() ] \n except:\n print \"Does the URL exist?\"", "def get_and_download(api, path, num_tweets=25, profile=\"@hakeemangulu\", admin=False):\n return downloader(get_media(api, num_tweets, profile, admin), path)", "def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets" ]
[ "0.68655646", "0.66298056", "0.62968886", "0.6094173", "0.5966897", "0.58850116", "0.5880054", "0.58753675", "0.58562684", "0.58468866", "0.58187366", "0.581417", "0.57802767", "0.57750183", "0.5772893", "0.5772719", "0.5762832", "0.5744211", "0.57211214", "0.571953", "0.56962967", "0.56922233", "0.56681454", "0.5648881", "0.564646", "0.56110567", "0.5576608", "0.55713415", "0.5531147", "0.55196786" ]
0.7178669
0
Given the object geometry generate a 3D plot using Geometry3D library Renderer instance
def plot_3d_object(object_): # Initialize renderer instance r = Renderer() # Add surfaces and goal regions to the renderer instance for surf in object_: r.add((object_[surf][0],'b',1)) if len(object_[surf])>2: r.add((object_[surf][2],'r',1)) r.add((gPoint(-15,-15,-15),'k',1)) r.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot3d(self):\n plot_rupture_wire3d(self)", "def plotTerrain3d(self, gdf: gpd.GeoDataFrame, fig_size: tuple=(12, 10), size: float=0.01):\n fig, ax = plt.subplots(1, 1, figsize=fig_size)\n ax = plt.axes(projection='3d')\n ax.scatter(gdf.geometry.x, gdf.geometry.y, gdf.elevation, s=size)\n plt.show()", "def render_3d(projection, **kwds):\n if isinstance(projection, Polyhedron): projection = Projection(projection)\n return \\\n projection.render_vertices_3d(width=3, color='green', **kwds) +\\\n projection.render_wireframe_3d(width=3, color='green', **kwds) + \\\n projection.render_solid_3d(**kwds)", "def plot3d(data_x, data_y, data_z, vol):\n fig = go.Figure(\n data = [\n go.Mesh3d(\n x = data_x,\n y = data_y,\n z = data_z,\n i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], # These are needed, numbers from documentation\n j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],\n k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],\n colorscale=[[0, 'darkblue'],\n [0.5, 'lightskyblue'],\n [1, 'darkblue']],\n intensity = np.linspace(0, 1, 8, endpoint=True),\n showscale=False,\n opacity = 0.6\n )\n ],\n layout = go.Layout(\n title = \"Le volume est: \" + str(vol),\n autosize = True\n )\n )\n\n # This prints it\n pyo.iplot(fig, filename='Determinant-Volume')", "def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()", "def plot_3d(self, ax_3d: Axes3D, lims_x: array_like = (-1, 1), lims_y: array_like = (-1, 1), **kwargs) -> None:\n X, Y, Z = self.to_mesh(lims_x, lims_y)\n\n ax_3d.plot_surface(X, Y, Z, **kwargs)", "def plot_3d(self, ax_3d: Axes3D, n_angles: int = 30, **kwargs) -> None:\n X, Y, Z = self.to_mesh(n_angles)\n\n ax_3d.plot_surface(X, Y, Z, **kwargs)", "def plot3D(self, diaphragmpoints=None, lungpoints=None, fig=None, ax=None, diaphragmcolor='r', lungcolor='g', size=2, howplot=0, dots=0):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n\n if diaphragmpoints is not None and lungpoints is not None:\n points = diaphragmpoints + lungpoints\n elif diaphragmpoints is not None:\n points = diaphragmpoints\n elif lungpoints is not None:\n points = lungpoints\n\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(points)):\n xpts.append(points[i][0])\n ypts.append(points[i][1])\n zpts.append(points[i][2])\n\n X = np.asarray(xpts)\n Y = np.asarray(ypts)\n Z = np.asarray(zpts)\n\n if howplot == 'wireframe':\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(pts)):\n xpts.append(pts[i][0])\n ypts.append(pts[i][1])\n zpts.append(pts[i][2])\n\n X = np.asarray([xpts])\n Y = np.asarray([ypts])\n Z = np.asarray([zpts])\n\n if dots == 1:\n ax.scatter(X, Y, Z, s=size, c='r', marker='o')\n\n ax.plot_wireframe(X, Y, Z)\n elif howplot == 1:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n else:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n ax.plot_trisurf(X, Y, Z, linewidth=0.2, antialiased=True)\n\n # Create cubic bounding box to simulate equal aspect ratio\n max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max()\n Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (X.max() + X.min())\n Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min())\n Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min())\n\n # Comment or uncomment following both lines to test the fake bounding box:\n for xb, yb, zb in zip(Xb, Yb, Zb):\n ax.plot([xb], [yb], [zb], 'w')\n\n plt.show()\n # fig.savefig('{}/diaphragm/{}.png'.format(DIR_RESULT))", "def plot_surface_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n # surface_x = self.xi_1_mesh\n # surface_y = self.xi_2_mesh\n # surface_x, surface_y, surface_z = self.surface()\n xyz = self.surface()\n\n # surface_x, surface_y = np.meshgrid(surface_x, surface_y)\n\n # print(np.shape(surface_x))\n # print(np.shape(surface_y))\n # print(np.shape(surface_z))\n\n control_points_x = np.array([control_point[0] for control_point in self.control_net.values()])\n control_points_y = np.array([control_point[1] for control_point in self.control_net.values()])\n control_points_z = np.array([control_point[2] for control_point in self.control_net.values()])\n\n # x_min = min(np.min(surface_x), np.min(control_points_x))\n # x_max = max(np.max(surface_x), np.max(control_points_x))\n # x_range = np.abs(x_max - x_min)\n #\n # y_min = min(np.min(surface_y), np.min(control_points_y))\n # y_max = max(np.max(surface_y), np.max(control_points_y))\n # y_range = np.abs(y_max - y_min)\n #\n # z_min = min(np.min(surface_z), np.min(control_points_z))\n # z_max = max(np.max(surface_z), np.max(control_points_z))\n # z_range = np.abs(z_max - z_min)\n #\n # ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n # ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n # ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.scatter(control_points_x, control_points_y, control_points_z, depthshade = False, **CONTROL_POLYGON_KWARGS)\n\n # print(np.max(surface_x), np.max(surface_y), np.max(surface_z))\n # print(np.min(surface_x), np.min(surface_y), np.min(surface_z))\n # print(surface_x)\n # print(surface_y)\n # print(surface_z)\n xyz = np.reshape(xyz, (-1, 3))\n print(xyz.shape)\n x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]\n ax.scatter(x, y, z)\n # ax.plot_trisurf(\n # x, y, z,\n # cmap = plt.get_cmap('viridis'),\n # linewidth = 0,\n # antialiased = True,\n # )\n # ax.plot_surface(surface_x, surface_y, surface_z, rstride = 1, cstride = 1)\n # ax.plot_trisurf(surface_x, surface_y, surface_z)\n # ax.plot_trisurf(surface_x, surface_y, surface_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n plt.show()\n utils.save_current_figure(**kwargs)\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()", "def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()", "def visualize_3d(grbdir,x, y, z, t, thetax, thetay, name):\n # Set ax.azim and ax.elev to ra, dec\n global runconf\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n plt.suptitle(r\"Visualisation of {name} in 3d:$\\theta_x$={tx:0.1f},$\\theta_y$={ty:0.1f}\".format(name=name, tx=thetax, ty=thetay))\n # Z\n ax = plt.subplot(2, 2, 1, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = z.ra.deg\n ax.elev = z.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI pointing (z)\")\n\n # Transient\n ax = plt.subplot(2, 2, 2, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = t.ra.deg\n ax.elev = t.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from nominal \\n transient direction\")\n\n # X\n ax = plt.subplot(2, 2, 3, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = x.ra.deg\n ax.elev = x.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI X axis\")\n\n # Z\n ax = plt.subplot(2, 2, 4, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = y.ra.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI Y axis\")\n\n return", "def render_vertices_3d(self, **kwds):\n return point3d(self.coordinates_of(self.points), **kwds)", "def plot3d(self, data, axis2, axis3, mesh, data_type='solution', colormap='blue-red', axes=False,\n cartesian_coordinates=False, interp_size=None, ax_names=None, style=0, *args, **kwargs):\n # if type(axis2) is not Axis1d or type(axis3) is not Axis1d:\n # raise NotImplementedError(\"3D plots with such combination of axes are not supported.\")\n # x_grid, y_grid, z_grid = self.cartesian_coordinates(axis2, axis3)\n # Title\n if data_type == 'solution':\n title = util.text.solution_caption(cartesian_coordinates, self, axis2, axis3).replace('$', '') \\\n .replace('{', '').replace('}', '')\n elif data_type == 'detector_geometry' or data_type == 'detector_geometry_n':\n title = ' ' + re.sub('[${}]', '', util.text.detector_caption(mesh, data_type, cartesian_coordinates))\n if axes:\n if ax_names is None:\n axes = ('{}, {}'.format(self.name, self.units),\n '{}, {}'.format(axis2.name, axis2.units),\n '{}, {}'.format(axis3.name, axis3.units))\n else:\n axes = ax_names\n # Voxel style\n if style == 0:\n if cartesian_coordinates:\n vertices, faces = self.cell_edges3d_cartesian(axis2, axis3)\n else:\n vertices, faces = self.cell_edges3d(axis2, axis3)\n vertices = np.array(vertices)\n if data_type == 'solution':\n x = []\n y = []\n z = []\n new_data = []\n new_faces = []\n shift = 0\n for i, a1 in enumerate(vertices):\n for j, a2 in enumerate(a1):\n for k, a3 in enumerate(a2):\n vert_faces = np.array(faces[i][j][k]) + shift\n for f in vert_faces:\n new_faces.append(f)\n for vert in a3:\n x.append(vert[0])\n y.append(vert[1])\n z.append(vert[2])\n new_data.append(data[i][j][k])\n shift += 1\n plot3d.voxel_plot(new_data, x, y, z, new_faces, title=title, axes=axes, colormap=colormap,\n *args, **kwargs)\n elif data_type == 'detector_geometry' or data_type == 'detector_geometry_n':\n new_data = []\n for det in data:\n x = []\n y = []\n z = []\n det_data = []\n new_faces = []\n shift = 0\n for i, a1 in enumerate(vertices):\n for j, a2 in enumerate(a1):\n for k, a3 in enumerate(a2):\n vert_faces = np.array(faces[i][j][k]) + shift\n for f in vert_faces:\n new_faces.append(f)\n for vert in a3:\n x.append(vert[0])\n y.append(vert[1])\n z.append(vert[2])\n det_data.append(det[i][j][k])\n shift += 1\n new_data.append(det_data)\n plot3d.detector_voxel_plot(new_data, x, y, z, new_faces, title=title, axes=axes, colormap=colormap,\n *args, **kwargs)\n else:\n raise ValueError('data type {} is unknown'.format(data_type))\n return 0, 0\n\n if cartesian_coordinates:\n x_grid, y_grid, z_grid = self.cartesian_coordinates(axis2, axis3)\n else:\n coord = [self.coordinates, axis2.coordinates, axis3.coordinates]\n x_grid, y_grid, z_grid = np.array(np.meshgrid(*coord, indexing='ij'))\n\n if data_type == 'solution':\n # irregular or non-cartesian axes\n if not all((self.regular, axis2.regular, axis3.regular)) or \\\n (cartesian_coordinates and not all(type(x) == cartesian.Axis1d for x in (self, axis2, axis3))):\n if interp_size is None:\n interp_size = 50\n warnings.warn(\"Since axes are not regular, linear interpolation with {} points used. \"\n \"You can change interpolation size with interp_size attribute.\"\n .format(interp_size ** 3))\n x_grid, y_grid, z_grid, new_data = \\\n util.geometry3d_basic.make_regular(data, x_grid, y_grid, z_grid, interp_size)\n new_data = np.nan_to_num(new_data)\n new_data = np.clip(new_data, np.amin(data), np.amax(data))\n mask = mesh.is_in_grid(self.from_cartesian([x_grid, y_grid, z_grid], axis2, axis3), self, axis2, axis3)\n new_data *= mask\n else:\n new_data = data\n # plot\n plot3d.contour3d(new_data, x_grid, y_grid, z_grid,\n title=title, colormap=colormap, axes=axes, style=style, *args, **kwargs)\n\n elif data_type == 'detector_geometry' or data_type == 'detector_geometry_n':\n # irregular axes\n if not all((self.regular, axis2.regular, axis3.regular)) or \\\n (cartesian_coordinates and not all(type(x) == cartesian.Axis1d for x in (self, axis2, axis3))):\n if interp_size is None:\n interp_size = 50\n warnings.warn(\"Since axes are not regular, linear interpolation with {} points used. \"\n \"You can change interpolation size with interp_size attribute.\"\n .format(interp_size ** 3))\n x_grid_n, y_grid_n, z_grid_n = x_grid, y_grid, z_grid\n new_data = np.zeros((data.shape[0], interp_size, interp_size, interp_size))\n # interpolate data for each detector\n print(\"Start interpolation.\")\n mask = None\n for i, d in enumerate(data):\n x_grid, y_grid, z_grid, new_data[i] \\\n = util.geometry3d_basic.make_regular(d, x_grid_n, y_grid_n, z_grid_n, interp_size)\n if mask is None:\n mask = mesh.is_in_grid(self.from_cartesian([x_grid, y_grid, z_grid], axis2, axis3), self, axis2,\n axis3)\n new_data[i] = np.nan_to_num(new_data[i])\n new_data[i] = np.clip(new_data[i], np.amin(data[i]), np.amax(data[i]))\n new_data[i] *= mask\n print('\\r', end='')\n print(\"...\", str((i + 1) * 100 // data.shape[0]) + \"% complete\", end='')\n print('\\r \\r', end='')\n\n else:\n new_data = data\n plot3d.detector_contour3d(new_data, x_grid, y_grid, z_grid,\n title=title, colormap=colormap, axes=axes, style=style, *args, **kwargs)\n else:\n raise ValueError('data type {} is unknown'.format(data_type))\n\n return 0, 0", "def visualize_scene_3D(self, pointcloud, objects, labels=None, calib=None):\n self.figure = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=(1280, 720))\n\n # Point Cloud\n self.visuallize_pointcloud(pointcloud)\n\n # 3D Boxes of model output\n for obj in objects:\n bbox_3d = obj.bbox_3d\n color = self.__get_box_color(obj.label)\n self.visualize_3d_bbox(bbox=bbox_3d, color=color, calib=calib)\n\n self.__draw_text_3D(*bbox_3d.pos, text=str( round(obj.score,2) ), color=color)\n\n # 3D Boxes of dataset labels \n if labels is not None:\n for obj in labels:\n self.visualize_3d_bbox(obj.bbox_3d, (1,1,0), calib)\n\n self.__show_3D()", "def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()", "def plot_3D(self, position, legend):\n # Initializing the figure\n fig = plt.figure(figsize=(10, 10))\n ax = fig.gca(projection='3d')\n # Looping over all object arrays in the position matrix,\n # adding it to the plot\n for i in range(self.numbodies):\n ax.plot(position[i, 0, :], position[i, 1, :], position[i, 2, :])\n # Decorating the plot\n ax.set_xlabel('x [AU]', fontsize=16)\n ax.set_ylabel('y [AU]', fontsize=16)\n ax.set_zlabel('z [AU]', fontsize=16)\n ax.set_title('The solar system. \\n %d years from Sep. 18 2018' \\\n %(self.t), fontsize=24)\n ax.legend(legend, loc=2, fontsize='small')\n plt.axis('equal')", "def display_facet(model_name, vertices, faces, plot_type, display_normals=False, scale=0.2):\n # Separate the coordinates of the vertices\n x = vertices[:, 0]\n y = vertices[:, 1]\n z = vertices[:, 2]\n\n # Display the model\n ax = Axes3D(plt.figure())\n if plot_type == 'Facet':\n ax.plot_trisurf(x, y, z, triangles=faces, color=(1, 1, 1, 1), edgecolor='gray')\n elif plot_type == 'Wireframe':\n ax.plot_trisurf(x, y, z, triangles=faces, color='none', edgecolor='black')\n ax.grid(True)\n set_equal(ax)\n\n ax.set_title(model_name, size='14')\n ax.set_xlabel('X', size='12')\n ax.set_ylabel('Y', size='12')\n ax.set_zlabel('Z', size='12')\n\n # Set the tick label size\n ax.tick_params(labelsize=12)\n\n if display_normals:\n\n # Vector from origin to vertices\n r = zeros([vertices.shape[0], 3])\n\n for i in range(vertices.shape[0]):\n r[i] = [vertices[i][0], vertices[i][1], vertices[i][2]]\n\n for i in range(faces.shape[0]):\n a = r[faces[i][1]] - r[faces[i][0]]\n b = r[faces[i][2]] - r[faces[i][1]]\n\n # Outward normal\n normal = cross(a, b) + 0.\n\n # Scale the size of the arrow to be displayed\n normal *= scale\n\n # Put the arrow at the center of the facet\n mean_r = (r[faces[i][0]] + r[faces[i][1]] + r[faces[i][2]]) / 3.0\n\n # Get the arrow for the normal\n arrow = Arrow3D([mean_r[0], mean_r[0] + normal[0]], [mean_r[1], mean_r[1] + normal[1]],\n [mean_r[2], mean_r[2] + normal[2]], mutation_scale=10, lw=1, arrowstyle=\"-|>\", color=\"r\")\n ax.add_artist(arrow)\n\n plt.show()", "def test_3d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n db.close()\n\n date = pd.to_datetime('2015-04-01')\n self.full_iv.get_data()\n df_date0 = self.full_iv.df_all.query('date == %r' % date)\n df_date1 = df_iv.query('date == %r' % date)\n df_date = pd.concat([df_date0, df_date1])\n \"\"\":type: pd.DataFrame\"\"\"\n\n x = df_date['dte']\n y = df_date['strike']\n z = df_date['impl_vol']\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n # noinspection PyUnresolvedReferences\n ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n # ax.plot_wireframe(x, y, z, rstride=1, cstride=1)\n plt.show()", "def plot_3d(x_data, y_data, Z, df, xlabel, ylabel, xrange=None,\n yrange=None, figsize=(12, 12)):\n fig = pyplot.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n nsamp, nsen = Z.shape\n\n sen_index = df.columns.names.index('sensor')\n senlist = df.columns.levels[sen_index]\n pyplot.yticks(y_data, senlist)\n ax.plot_surface(\n np.repeat(x_data,\n nsen, axis=1),\n np.repeat(np.matrix(y_data), nsamp, axis=0),\n df.values,\n cmap=cm.coolwarm)\n pyplot.xlabel(xlabel)\n pyplot.ylabel('Sensor name')\n ax.set_zlabel(ylabel)\n ax.view_init(elev=45., azim=-130)\n ax.tick_params(axis='y', which='major', labelsize=4)\n pyplot.show()", "def plot_3D(Y_data, num_area):\n ref_shape = [Y_data.shape[0], Y_data.shape[1], Y_data.shape[2]]\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n for a in np.arange(1, num_area+1):\n loc = np.where(Y_data == a)\n ax.scatter3D(loc[0], loc[1], loc[2], marker=\".\", alpha=0.9)\n\n plt.show()", "def D3(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_D3(self, *args)", "def plot_3d_plot(self, features, headers, labels):\n self.master_plot.scatter(features[:, 0], features[:, 1], features[:, 2], c=labels)\n self.master_plot.set_xlabel(headers[0])\n self.master_plot.set_ylabel(headers[1])\n self.master_plot.set_zlabel(headers[2])\n\n plot_hyperplane(self.clf, self.master_plot, colors='orange')", "def get_3d_plot(three_d_matrix, ax, title, length):\r\n x, y, z = np.where(three_d_matrix != 0)\r\n ax.scatter(x, y, z, c='blue')\r\n ax.set_xlabel('x')\r\n ax.set_ylabel('y')\r\n ax.set_xlim(0, length)\r\n ax.set_ylim(0, length)\r\n ax.set_title(title)", "def plot_3D(self, title=None, fig_size=None, close=True):\r\n # TODO ajouter des titres\r\n combs = list(itertools.combinations(np.arange(self.features.shape[1]), 3))\r\n idx_plot = 1\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n if len(combs) % 2 == 1:\r\n n_col, n_row = (int((len(combs) + 1) / 2), int(len(combs) / 2))\r\n else:\r\n n_col, n_row = (int(len(combs) / 2), int(len(combs) / 2))\r\n for x, y, z in combs:\r\n ax = fig.add_subplot(n_row, n_col, idx_plot, projection='3d')\r\n for target in self.targets:\r\n idx = np.where(self.labels == target)\r\n ax.scatter(self.features[idx, x], self.features[idx, y], self.features[idx, z], label=str(target))\r\n if self.features_names is not None:\r\n ax.set_xlabel(str(self.features_names[x]))\r\n ax.set_ylabel(str(self.features_names[y]))\r\n ax.set_zlabel(str(self.features_names[z]))\r\n if title is not None:\r\n ax.set_title(title[idx_plot - 1])\r\n idx_plot += 1\r\n plt.legend(fontsize='small')\r\n if close:\r\n plt.show()\r\n else:\r\n return fig", "def viewer(\n self, units='nm', \n draw_edges=True, draw_vertices=True,\n color_by='radius'\n ):\n try:\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D \n from matplotlib import cm\n except ImportError:\n print(\"Skeleton.viewer requires matplotlib. Try: pip install matplotlib --upgrade\")\n return\n\n RADII_KEYWORDS = ('radius', 'radii', 'r')\n COMPONENT_KEYWORDS = ('component', 'components', 'c')\n\n fig = plt.figure(figsize=(10,10))\n ax = Axes3D(fig)\n ax.set_xlabel(units)\n ax.set_ylabel(units)\n ax.set_zlabel(units)\n\n # Set plot axes equal. Matplotlib doesn't have an easier way to\n # do this for 3d plots.\n X = self.vertices[:,0]\n Y = self.vertices[:,1]\n Z = self.vertices[:,2]\n\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n mid_x = (X.max()+X.min()) * 0.5\n mid_y = (Y.max()+Y.min()) * 0.5\n mid_z = (Z.max()+Z.min()) * 0.5\n ax.set_xlim(mid_x - max_range, mid_x + max_range)\n ax.set_ylim(mid_y - max_range, mid_y + max_range)\n ax.set_zlim(mid_z - max_range, mid_z + max_range)\n ### END EQUALIZATION CODE ###\n\n component_colors = ['k', 'deeppink', 'dodgerblue', 'mediumaquamarine', 'gold' ]\n\n def draw_component(i, skel):\n component_color = component_colors[ i % len(component_colors) ]\n\n if draw_vertices:\n xs = skel.vertices[:,0]\n ys = skel.vertices[:,1]\n zs = skel.vertices[:,2]\n\n if color_by in RADII_KEYWORDS:\n colmap = cm.ScalarMappable(cmap=cm.get_cmap('rainbow'))\n colmap.set_array(skel.radii)\n\n normed_radii = skel.radii / np.max(skel.radii)\n yg = ax.scatter(xs, ys, zs, c=cm.rainbow(normed_radii), marker='o')\n cbar = fig.colorbar(colmap)\n cbar.set_label('radius (' + units + ')', rotation=270)\n elif color_by in COMPONENT_KEYWORDS:\n yg = ax.scatter(xs, ys, zs, color=component_color, marker='.')\n else:\n yg = ax.scatter(xs, ys, zs, color='k', marker='.')\n\n if draw_edges:\n for e1, e2 in skel.edges:\n pt1, pt2 = skel.vertices[e1], skel.vertices[e2]\n ax.plot( \n [ pt1[0], pt2[0] ],\n [ pt1[1], pt2[1] ],\n zs=[ pt1[2], pt2[2] ],\n color=(component_color if not draw_vertices else 'silver'),\n linewidth=1,\n )\n\n if color_by in COMPONENT_KEYWORDS:\n for i, skel in enumerate(self.components()):\n draw_component(i, skel)\n else:\n draw_component(0, self)\n\n plt.show()", "def draw_stl_from_mesh(m):\n plt.ion()\n # Create a new plot\n figure = plt.figure()\n axes = mplot3d.Axes3D(figure)\n\n # Render the cube faces\n #for m in meshes:\n axes.add_collection3d(mplot3d.art3d.Poly3DCollection(m.vectors))\n\n # Auto scale to the mesh size\n scale = m.points.flatten(-1)\n axes.auto_scale_xyz(scale, scale, scale)", "def render_wireframe_3d(self, **kwds):\n wireframe = [];\n for l in self.lines:\n l_coords = self.coordinates_of(l)\n wireframe.append( line3d(l_coords, **kwds))\n for a in self.arrows:\n a_coords = self.coordinates_of(a)\n wireframe.append(arrow3d(a_coords[0], a_coords[1], **kwds))\n return sum(wireframe)", "def plot_results_3d(p_x, p_y, p_z, h_exp = 0.5):\n plt.figure(figsize = (10, 10))\n ax3d = plt.axes(projection = '3d') \n\n color=iter(cm.rainbow(np.linspace(0,1,p_x.shape[0]))) # (1)\n labels = ['Particle ' + str(pl+1) for pl in np.arange(0, p_x.shape[0], step = 1)]\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n c = next(color) # (1)\n for t in np.arange(0, p_x.shape[1], step = 1): \n ax3d.plot3D(p_x[p, t], p_y[p, t], p_z[p, t], 'x', c = c, label = labels[p]) \n legend_without_duplicate_labels(ax3d)\n ax3d.set_xlabel('X (pixels)') \n ax3d.set_ylabel('Y (pixels') \n ax3d.set_zlabel('Z (pixels)') \n ax3d.set_xlim([origin-150,origin+150])\n ax3d.set_ylim([origin-150,origin+150])\n ax3d.set_zlim([origin-150,origin+150])\n ax3d.set_title('3D particle trajectories - H = ' + str(h_exp))", "def plot_3D_compare(true_lab, pred_lab):\n ref_shape = [true_lab.shape[1], true_lab.shape[2], true_lab.shape[3]]\n true_loc = np.where(true_lab == 1)\n pred_loc = np.where(pred_lab == 1)\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.9)\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.05)\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()", "def visualize_shape_model_3d(\n shape_model, n_parameters=5, mode=\"multiple\", parameters_bounds=(-15.0, 15.0)\n):\n # Make sure that shape_model is a list even with one member\n if not isinstance(shape_model, list):\n shape_model = [shape_model]\n\n # Get the number of levels (i.e. number of shape models)\n n_levels = len(shape_model)\n\n # Check if the model is TriMesh or any other 3D shape class\n is_trimesh = isinstance(shape_model[0].template_instance, TriMesh)\n\n # Define the styling options\n main_style = \"warning\"\n\n # Get the maximum number of components per level\n max_n_params = [sp.n_active_components for sp in shape_model]\n\n # Check the given number of parameters (the returned n_parameters is a list\n # of len n_scales)\n n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)\n\n output = ipywidgets.Output()\n\n @output.capture(clear_output=True, wait=True)\n def render_function(change):\n save_figure_wid.renderer.clear_figure()\n\n # Get selected level\n level = 0\n if n_levels > 1:\n level = level_wid.value\n\n # Compute weights\n parameters = model_parameters_wid.selected_values\n weights = parameters * shape_model[level].eigenvalues[: len(parameters)] ** 0.5\n\n # Compute instance\n instance = shape_model[level].instance(weights)\n\n # Create options dictionary\n options = dict()\n if is_trimesh:\n options.update(shape_options_wid.selected_values)\n else:\n options.update(shape_options_wid.selected_values[\"lines\"])\n options.update(shape_options_wid.selected_values[\"markers\"])\n options.update(renderer_options_wid.selected_values[\"numbering_mayavi\"])\n # Correct options based on the type of the shape\n if hasattr(instance, \"labels\"):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use with_labels\n options[\"with_labels\"] = shape_options_wid.selected_values[\n \"with_labels\"\n ]\n # ...correct colours\n line_colour = []\n marker_colour = []\n for lbl in options[\"with_labels\"]:\n idx = instance.labels.index(lbl)\n line_colour.append(options[\"line_colour\"][idx])\n marker_colour.append(options[\"marker_colour\"][idx])\n options[\"line_colour\"] = line_colour\n options[\"marker_colour\"] = marker_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options[\"line_colour\"] = options[\"line_colour\"][0]\n options[\"marker_colour\"] = options[\"marker_colour\"][0]\n\n # Update info\n update_info(level, instance.range())\n\n # Render instance\n save_figure_wid.renderer = instance.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False, **options\n )\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Define function that updates the info text\n def update_info(level, instance_range):\n text_per_line = [\n \"> Level {} out of {}\".format(level + 1, n_levels),\n \"> {} components in total\".format(shape_model[level].n_components),\n \"> {} active components\".format(shape_model[level].n_active_components),\n \"> {:.1f}% variance kept\".format(shape_model[level].variance_ratio() * 100),\n \"> Instance range: {:.1f} x {:.1f}\".format(\n instance_range[0], instance_range[1]\n ),\n \"> {} points\".format(shape_model[level].mean().n_points),\n ]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n @output.capture(clear_output=True, wait=True)\n def plot_variance(name):\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Render\n plt.subplot(121)\n shape_model[level].plot_eigenvalues_ratio()\n plt.subplot(122)\n shape_model[level].plot_eigenvalues_cumulative_ratio()\n plt.show()\n\n # Create widgets\n model_parameters_wid = LinearModelParametersWidget(\n n_parameters[0],\n render_function,\n params_str=\"Parameter \",\n mode=mode,\n params_bounds=parameters_bounds,\n params_step=0.1,\n plot_variance_visible=True,\n plot_variance_function=plot_variance,\n animation_step=0.5,\n interval=0.0,\n loop_enabled=True,\n continuous_update=False,\n )\n if is_trimesh:\n shape_options_wid = Mesh3DOptionsWidget(\n textured=False, render_function=render_function\n )\n else:\n labels = None\n if hasattr(shape_model[0].mean(), \"labels\"):\n labels = shape_model[0].mean().labels\n shape_options_wid = Shape3DOptionsWidget(\n labels=labels, render_function=render_function\n )\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=[\"numbering_mayavi\"],\n labels=None,\n render_function=render_function,\n )\n info_wid = TextPrintWidget(text_per_line=[\"\"])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n if n_levels > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n model_parameters_wid.set_widget_state(\n n_parameters=n_parameters[change[\"new\"]],\n params_str=\"Parameter \",\n allow_callback=True,\n )\n\n # Create pyramid radiobuttons\n radio_str = OrderedDict()\n for l in range(n_levels):\n if l == 0:\n radio_str[\"Level {} (low)\".format(l)] = l\n elif l == n_levels - 1:\n radio_str[\"Level {} (high)\".format(l)] = l\n else:\n radio_str[\"Level {}\".format(l)] = l\n level_wid = ipywidgets.RadioButtons(\n options=radio_str,\n description=\"Pyramid\",\n value=n_levels - 1,\n layout=ipywidgets.Layout(width=\"6cm\"),\n )\n level_wid.observe(update_widgets, names=\"value\", type=\"change\")\n level_wid.observe(render_function, names=\"value\", type=\"change\")\n tmp_wid = ipywidgets.HBox([level_wid, model_parameters_wid])\n else:\n tmp_wid = ipywidgets.HBox(children=[model_parameters_wid])\n if is_trimesh:\n options_box = ipywidgets.Tab(\n children=[tmp_wid, shape_options_wid, info_wid, save_figure_wid]\n )\n tab_titles = [\"Model\", \"Mesh\", \"Info\", \"Export\"]\n else:\n options_box = ipywidgets.Tab(\n children=[\n tmp_wid,\n shape_options_wid,\n renderer_options_wid,\n info_wid,\n save_figure_wid,\n ]\n )\n tab_titles = [\"Model\", \"Shape\", \"Renderer\", \"Info\", \"Export\"]\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = \"0px 10px 0px 0px\"\n\n output.layout.align_self = \"center\"\n wid = ipywidgets.VBox([logo_wid, options_box, output])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = \"2px solid \" + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = \"flex\"\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n print_dynamic(\"\")" ]
[ "0.699979", "0.6854899", "0.6696654", "0.65606564", "0.65247005", "0.64862555", "0.64855486", "0.64444196", "0.64396155", "0.63991827", "0.6364463", "0.62895536", "0.6284521", "0.62425154", "0.62332267", "0.6223329", "0.6215087", "0.60715675", "0.60688156", "0.60422426", "0.5999724", "0.5986411", "0.5985341", "0.59687513", "0.5943963", "0.5940368", "0.5920948", "0.59181726", "0.5893476", "0.5892096" ]
0.79453415
0
Given the object geometry, determine neighbors for each surface
def get_neighbors(object_): # Initialize neighbors dictionary neighbors = dict() # For each surface in object dictionary for surf in object_: # Selected surface current_surface = object_[surf][0] # Surface normal current_normal = object_[surf][1] # Rest of the surfaces dummy_surfaces = object_.copy() dummy_surfaces.pop(surf) # Initialize nested dictionary for selected surface neighbors[surf] = dict() # For each candidate surface (from rest of the surfaces) for n in dummy_surfaces: # Candidate surface candidate_surface = dummy_surfaces[n][0] # Candidate normal candidate_normal = dummy_surfaces[n][1] # Check if there is an intersection - there should be a line intersection if neighboring surfaces check = intersection(current_surface,candidate_surface) if check is not None: # Intersection vector rotation_axis = candidate_normal.cross(current_normal) # Angle between surfaces angle = candidate_normal.angle(current_normal) # Corner position axis_position = gPoint((check[0].x+check[1].x)/2,(check[0].y+check[1].y)/2,(check[0].z+check[1].z)/2) neighbors[surf][n] = ((candidate_surface,rotation_axis,axis_position,angle)) return neighbors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def neighbors(self):\n return self.mesh.neighbors()", "def _get_single_direction_neighbors(object_idx, ui_v_dist, ui_h_dist):\n neighbor_dict = {}\n vertical_dist = ui_v_dist[object_idx]\n horizontal_dist = ui_h_dist[object_idx]\n bottom_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] > 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n top_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] < 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n right_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] > 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n left_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] < 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n\n if bottom_neighbors.size:\n neighbor_dict[NeighborContextDesc.TOP] = bottom_neighbors[np.argmin(\n vertical_dist[bottom_neighbors])]\n if top_neighbors.size:\n neighbor_dict[NeighborContextDesc.BOTTOM] = top_neighbors[np.argmax(\n vertical_dist[top_neighbors])]\n if right_neighbors.size:\n neighbor_dict[NeighborContextDesc.LEFT] = right_neighbors[np.argmin(\n horizontal_dist[right_neighbors])]\n if left_neighbors.size:\n neighbor_dict[NeighborContextDesc.RIGHT] = left_neighbors[np.argmax(\n horizontal_dist[left_neighbors])]\n\n return neighbor_dict", "def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours", "def neighbors(self):\n adjacency_matrix = self.polyhedron().facet_adjacency_matrix()\n for x in self.polyhedron().Hrep_generator():\n if adjacency_matrix[self.index(), x.index()] == 1:\n yield x", "def neighbors(self):\n adjacency_matrix = self.polyhedron().facet_adjacency_matrix()\n for x in self.polyhedron().Hrep_generator():\n if adjacency_matrix[self.index(), x.index()] == 1:\n yield x", "def neighbors(self, node_object):\n\n (node_column, node_row) = node_object\n row_flags = numpy.logical_and(\n self.row_indices_in_region >= node_row - 1,\n self.row_indices_in_region <= node_row + 1)\n column_flags = numpy.logical_and(\n self.column_indices_in_region >= node_column - 1,\n self.column_indices_in_region <= node_column + 1)\n\n neighbour_indices = numpy.where(\n numpy.logical_and(row_flags, column_flags))[0]\n neighbour_indices = neighbour_indices.tolist()\n\n node_index = numpy.where(numpy.logical_and(\n self.row_indices_in_region == node_row,\n self.column_indices_in_region == node_column))[0][0]\n neighbour_indices.remove(node_index)\n\n return [(self.column_indices_in_region[i],\n self.row_indices_in_region[i]) for i in neighbour_indices]", "def neighbors(node, topology):\n return [n for n in topology[node]]", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def neighbors(self):\n \n # find 0 - blank square\n \n x0 = None\n y0 = None\n \n for i in range(4):\n for j in range(4):\n if self.get_tile(i,j) == 0:\n y0 = i\n x0 = j\n\n if x0 == None or y0 == None:\n return []\n \n neighbor_list = []\n \n # move 0 to the right\n if x0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0+1)\n new_position.set_tile(y0,x0+1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'r'\n neighbor_list.append(new_position)\n # move 0 to the left\n if x0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0-1)\n new_position.set_tile(y0,x0-1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'l'\n neighbor_list.append(new_position)\n # move 0 up\n if y0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0-1,x0)\n new_position.set_tile(y0-1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'u'\n neighbor_list.append(new_position)\n # move 0 down\n if y0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0+1,x0)\n new_position.set_tile(y0+1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'd'\n neighbor_list.append(new_position)\n \n return neighbor_list", "def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys", "def find_neighbours(engine, field, features):\n code = CodeSegment(engine)\n N = len(engine.q)\n Nf = 3 ** engine.pm.ndim\n code.assign(x=Literal(numpy.zeros((N, Nf))), y='features')\n grid = engine.pm.generate_uniform_particle_grid(shift=0)\n for i in range(Nf):\n ii = i\n a = []\n for d in range(engine.pm.ndim):\n a.append(ii % 3 - 1)\n ii //= 3\n\n grid1 = grid + numpy.array(a[::-1]) * (engine.pm.BoxSize / engine.pm.Nmesh)\n layout = engine.pm.decompose(grid1)\n code.readout(x=Literal(grid1), mesh='field', value='feature1', layout=Literal(layout), resampler='nearest')\n code.assign_component(attribute='features', value='feature1', dim=i)\n return code", "def get_neighbors(y, x, H, W):\n neighbors = []\n\n for i in (y-1, y, y+1):\n for j in (x-1, x, x+1):\n if i >= 0 and i < H and j >= 0 and j < W:\n if (i == y and j == x):\n continue\n neighbors.append((i, j))\n\n return neighbors", "def get_neighbors(y, x, H, W):\n neighbors = []\n\n for i in (y-1, y, y+1):\n for j in (x-1, x, x+1):\n if i >= 0 and i < H and j >= 0 and j < W:\n if (i == y and j == x):\n continue\n neighbors.append((i, j))\n\n return neighbors", "def neighbors(self):\n adjacency_matrix = self.polyhedron().vertex_adjacency_matrix()\n for x in self.polyhedron().Vrep_generator():\n if adjacency_matrix[self.index(), x.index()] == 1:\n yield x", "def neighbors(self):\n adjacency_matrix = self.polyhedron().vertex_adjacency_matrix()\n for x in self.polyhedron().Vrep_generator():\n if adjacency_matrix[self.index(), x.index()] == 1:\n yield x", "def test_neighbors():\n world, bodies, sample_configuration = example_world()\n\n geometry = metis.geometry.ManyShapeGeometry(world, bodies)\n dynamics = metis.dynamics.MagneticDynamics(bodies)\n configurations = {(name, None): [value,]\n for name, value in sample_configuration.iteritems()}\n factored_graph = FactoredRandomGeometricGraph(\n geometry, dynamics, default_count=100,\n blacklist=NoObjectContactBlacklist(), configurations=configurations)\n\n vertex = factored_graph.nearest(sample_configuration)\n neighbors = list(factored_graph.neighbors(vertex))\n\n duplicates = set()\n seen = set()\n for neighbor in neighbors:\n if neighbor in seen:\n duplicates.add(neighbor)\n else:\n seen.add(neighbor)\n assert len(duplicates) == 0, (\n \"Neighbors should be unique: had duplicate elements\"\n \"\\n\\t\".join(str(d) for d in duplicates)\n )\n\n assert any(v['robot'][0] is not None for v in neighbors), graphical_debug(\n \"There should be at least one neighbor with an object in its grasp\",\n lambda ax: draw_polygons(ax, {\n name: metis.geometry.shapely_from_box2d_body(bodies[name], pose)\n for name, pose in factored_graph[vertex].iteritems()}))\n\n grasping = next(v for v in neighbors if v['robot'][0] is not None)\n neighbors = list(factored_graph.neighbors(grasping))\n assert any(v['robot'][0] is None for v in neighbors), graphical_debug(\n \"There should be at least one neighbor without an object in its grasp\",\n lambda ax: draw_polygons(ax, {\n name: metis.geometry.shapely_from_box2d_body(bodies[name], pose)\n for name, pose in factored_graph[grasping].iteritems()}))", "def get_neighbors(self, row, col):\n neighbors = set()\n for d in [-1,1]:\n if row+d >= 0 and row+d < self._height and \\\n (row+d,col) in self._empty_spaces:\n neighbors.add((row+d,col))\n if col+d >= 0 and col+d < self._width and \\\n (row,col+d) in self._empty_spaces:\n neighbors.add((row,col+d))\n return neighbors", "def find_neighbors(self, room):\r\n # options to go to find neighbors\r\n delta = [('W', (-1, 0)),\r\n ('E', (1, 0)),\r\n ('S', (0, 1)),\r\n ('N', (0, -1))]\r\n neighbors = []\r\n for direction, (dx, dy) in delta:\r\n x2, y2 = room.x + dx, room.y + dy\r\n if (0 <= x2 < self.__nx) and (0 <= y2 < self.__ny):\r\n neighbour = self.room_at(x2, y2)\r\n if neighbour.has_all_walls():\r\n neighbors.append((direction, neighbour))\r\n return neighbors", "def check_neighbours(self, grid):\n if self.bomba:\n self.bombs_around = -1\n return\n\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n\n if neighbor.bomba:\n total += 1\n \n self.bombs_around = total", "def _find_neighbours(self):\n\n neighbours = []\n for i, p in enumerate(self.frame_0):\n nearests = np.where(np.linalg.norm(self.frame_0 - p, axis=1) <= self.R_n)[0]\n # delete self index\n index = np.argwhere(nearests==i)\n nearests = np.delete(nearests, index)\n neighbours.append(nearests)\n\n return neighbours", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def pd_king_neighbors_and_self(obj: PdObject) -> List[Tuple[bool, Any]]:\n\n if isinstance(obj, Block):\n raise TypeError('Cannot compute neighbors of block ' + repr(obj))\n if isinstance(obj, (Char, int, float, complex)):\n return [\n (True, num.pd_add_const(obj, -1)),\n (False, obj),\n (True, num.pd_add_const(obj, 1)),\n ]\n elif len(obj) == 0:\n return [(False, obj)]\n elif isinstance(obj, str):\n # type juggling is actually kind of annoying, just doing it explicitly\n return [\n (any(deltas), ''.join(chr(ord(ch) + delta) for ch, delta in zip(obj, deltas)))\n for deltas in itertools.product([-1, 0, 1], repeat=len(obj))\n ]\n else:\n xs = pd_to_list(obj)\n\n return [\n (tag or tag2, [neighbor] + neighbors)\n for tag, neighbor in pd_king_neighbors_and_self(xs[0])\n for tag2, neighbors in pd_king_neighbors_and_self(xs[1:])\n ]", "def element_neighbors(self):\n if self.element_neighbors_data is not None:\n return self.element_neighbors_data\n\n max_nr_edges = self.header['element_infos'][0, 2]\n\n # initialize the neighbor array\n self.element_neighbors_data = []\n self.element_neighbors_edges = []\n\n # determine neighbors\n print('Looking for neighbors')\n time_start = time.time()\n for nr, element_nodes in enumerate(self.elements):\n # print('element {0}/{1}'.format(nr + 1, self.nr_of_elements))\n # print(element_nodes)\n neighbors = []\n neighbors_edges = [] # store the edges to this neighbor\n for nr1, el in enumerate(self.elements):\n # we look for elements that have two nodes in common with this\n # element\n intersection = np.intersect1d(element_nodes, el)\n if intersection.size == 2:\n neighbors.append(nr1)\n neighbors_edges.append(intersection)\n # stop if we reached the maximum number of possible edges\n # this saves us quite some loop iterations\n if len(neighbors) == max_nr_edges:\n break\n self.element_neighbors_data.append(neighbors)\n self.element_neighbors_edges.append(neighbors_edges)\n time_end = time.time()\n print('elapsed time: {} s'.format(time_end - time_start))\n return self.element_neighbors_data", "def pd_king_neighbors(obj: PdObject) -> List[PdObject]:\n return [neighbor for tag, neighbor in pd_king_neighbors_and_self(obj) if tag]", "def find_edges(self):\n self.edges = [deepcopy(self.grid[0]), [], deepcopy(self.grid[-1]), []]\n for g in self.grid:\n self.edges[3].append(g[0])\n self.edges[1].append(g[-1])\n self.edges[2]\n self.edges[3]", "def peridym_compute_neighbors(mesh, horizon, struct_grd=False):\n start = tm.default_timer()\n print(\"computing the neighbor list of the mesh for horizon size of %f\" %horizon)\n neighbor_lst = []\n\n if(struct_grd):\n cell_cent = structured_cell_centroids(mesh)\n else:\n cell_cent = get_cell_centroids(mesh)\n \n num_cells = len(cell_cent)\n for i in range(num_cells):\n curr_dist = 0.0\n curr_neighbor_lst = []\n\n for j in range(i):\n curr_dist = la.norm(cell_cent[i] - cell_cent[j],2)\n if curr_dist <= horizon : \n curr_neighbor_lst.append(j) # appending the element ID to neighbor_list\n\n for j in range(i+1, num_cells):\n curr_dist = la.norm(cell_cent[j] - cell_cent[i],2)\n if curr_dist <= horizon : \n curr_neighbor_lst.append(j) # appending the element ID to neighbor_list\n\n neighbor_lst.append(np.array(curr_neighbor_lst))\n print(\"time taken for computation of naive neighbor list for the given mesh is %4.3f sec\"%(tm.default_timer()-start))\n return np.array(neighbor_lst)", "def neighbors(self, x):\n pass", "def __neighbors(self, x, y):\n if (x > 0) and not self.is_wall(x - 1, y):\n yield x - 1, y\n if (x < self.width - 1) and not self.is_wall(x + 1, y):\n yield x + 1, y\n if (y > 0) and not self.is_wall(x, y - 1):\n yield x, y - 1\n if (y < self.height - 1) and not self.is_wall(x, y + 1):\n yield x, y + 1", "def get_neighbors(grid, x, y):\n out = []\n if x > 0:\n out.append(grid[x-1, y])\n if y > 0:\n out.append(grid[x, y-1])\n if y < grid.shape[1] - 1:\n out.append(grid[x, y+1])\n if x < grid.shape[0] - 1:\n out.append(grid[x+1, y])\n return out", "def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)" ]
[ "0.70040745", "0.6541992", "0.64760965", "0.63982475", "0.63982475", "0.6391575", "0.6362306", "0.63613594", "0.63120025", "0.6269004", "0.6226812", "0.62244326", "0.62244326", "0.62144446", "0.62144446", "0.6202256", "0.61859506", "0.61818993", "0.6158315", "0.6152837", "0.61285466", "0.6111063", "0.60541326", "0.60434765", "0.6031088", "0.60221714", "0.60216177", "0.60209227", "0.5993906", "0.598994" ]
0.73999304
0
Given the object surface dict and neighbors dict, generate an unfolded surface for the desired surface centering a selected surface
def unfold_surface(surface_dict,neighbors_dict,surf_idx,other,neighbor,show=False): # Visualization p = Renderer() p.add((surface_dict[surf_idx][0],'r',1)) # Normal of the center surface current_normal = surface_dict[surf_idx][1] # Normal of the neighboring surface candidate_normal = other[1] # Angle between surfaces angle = candidate_normal.angle(current_normal) # Rotation calculations (Finding transformation matrix) A,B,C = neighbor[1] L = np.sqrt(A**2 + B**2 + C**2) V = np.sqrt(B**2 + C**2) D = np.array([[1,0,0,-neighbor[2][0]],[0,1,0,-neighbor[2][1]],[0,0,1,-neighbor[2][2]],[0,0,0,1]]) if V == 0: R_x = np.eye(4) else: R_x = np.array([[1,0,0,0],[0,C/V,-B/V,0],[0,B/V,C/V,0],[0,0,0,1]]) R_y = np.array([[V/L,0,-A/L,0],[0,1,0,0],[A/L,0,V/L,0],[0,0,0,1]]) R_z = np.array([[np.cos(angle),-np.sin(angle),0,0], [np.sin(angle),np.cos(angle),0,0], [0,0,1,0],[0,0,0,1]]) T = np.linalg.inv(D)@np.linalg.inv(R_x)@np.linalg.inv(R_y)@R_z@R_y@R_x@D # Applying transformation P_init = np.empty((4,0)) for point in other[0].points: point_vec = np.array([point.x,point.y,point.z,1]).reshape(4,1) P_init = np.concatenate([P_init,point_vec],axis=1) normal_vec = np.array([other[0].points[-1].x+candidate_normal[0],other[0].points[-1].y+candidate_normal[1],other[0].points[-1].z+candidate_normal[2],1]).reshape(4,1) P_init = np.concatenate([P_init,normal_vec],axis=1) P_final = T@P_init new_points = list() for i in range(P_final.shape[1]-1): new_points.append(gPoint(np.round(P_final[:3,i],decimals=3))) # New normal vector nnormal = Vector(P_final[:3,-1]-P_final[:3,-2]) # New surface definition as convex polygon nsurf = ConvexPolygon((new_points)) # Transform goal region as well if len(other)>2: G_init = np.empty((4,0)) for point in other[2].points: point_vec = np.array([point.x,point.y,point.z,1]).reshape(4,1) G_init = np.concatenate([G_init,point_vec],axis=1) G_final = T@G_init new_goal = list() for i in range(G_final.shape[1]): new_goal.append(gPoint(np.round(G_final[:3,i],decimals=3))) ngoal = ConvexPolygon((new_goal)) p.add((nsurf,'k',1)) p.add((ngoal,'k',1)) if show: p.add((other[0],'k',1)) p.show() return (nsurf,nnormal,ngoal) else: p.add((nsurf,'k',1)) if show: p.add((other[0],'k',1)) p.show() return (nsurf,nnormal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfold_object(current_surface,surface_dict,neighbors_dict):\n\n # Initialize a dictionary for the unfolded surfaces\n unfolded_surfaces = dict()\n\n # Generate a list of surface numbers\n surface_list = list(surface_dict.keys())\n\n # Add center surface to dictionary without modification\n unfolded_surfaces[current_surface] = surface_dict[current_surface]\n\n # Remove center surface from surface numbers list\n surface_list.remove(current_surface)\n\n # Generate an open list that contains the neighboring surface numbers to the center surface\n open_list = [(neighbor,[current_surface]) for neighbor in neighbors_dict[current_surface]]\n\n # Run until all surfaces are added to the unfolding\n while len(surface_list) > 0:\n\n # Next item in open list\n item = open_list[0]\n\n # Initialize a list of parents\n parents = list()\n\n # If neighbor is not already added\n if item[0] in surface_list:\n # Get folded version of the surface\n folded_surf = surface_dict[item[0]]\n child = item[0]\n\n # while there are still other parents of neighboring surface \n # (when more than one unfolding is required for a neighbor of the neighbor)\n while len(item[1])>0:\n # get parent surface no\n parent = item[1][-1]\n # unfold surface on the parent\n folded_surf = unfold_surface(surface_dict,neighbors_dict,item[1][-1],folded_surf,neighbors_dict[parent][child])\n # parent becomes the child\n child = item[1][-1]\n # remove parent and add to the parents list\n parents.append(item[1].pop())\n\n unfolded_surf = folded_surf\n\n else:\n open_list.pop(0)\n continue\n\n unfolded_surfaces[item[0]] = unfolded_surf\n \n surface_list.remove(item[0])\n\n parents.reverse()\n # for neighbors of the current neighbor\n for neighbor in neighbors_dict[item[0]]:\n # if not already added\n if neighbor in surface_list:\n # add to the open list with correct parenting order\n open_list.append((neighbor,parents+[item[0]]))\n else:\n continue\n open_list.pop(0)\n return unfolded_surfaces", "def get_neighbors(object_):\n\n # Initialize neighbors dictionary\n neighbors = dict()\n\n # For each surface in object dictionary\n for surf in object_:\n\n # Selected surface\n current_surface = object_[surf][0]\n\n # Surface normal\n current_normal = object_[surf][1]\n\n # Rest of the surfaces\n dummy_surfaces = object_.copy()\n dummy_surfaces.pop(surf)\n\n # Initialize nested dictionary for selected surface\n neighbors[surf] = dict()\n\n # For each candidate surface (from rest of the surfaces)\n for n in dummy_surfaces:\n # Candidate surface\n candidate_surface = dummy_surfaces[n][0]\n # Candidate normal\n candidate_normal = dummy_surfaces[n][1]\n # Check if there is an intersection - there should be a line intersection if neighboring surfaces\n check = intersection(current_surface,candidate_surface)\n if check is not None:\n # Intersection vector\n rotation_axis = candidate_normal.cross(current_normal)\n # Angle between surfaces\n angle = candidate_normal.angle(current_normal)\n # Corner position\n axis_position = gPoint((check[0].x+check[1].x)/2,(check[0].y+check[1].y)/2,(check[0].z+check[1].z)/2)\n neighbors[surf][n] = ((candidate_surface,rotation_axis,axis_position,angle))\n return neighbors", "def __init__(self, initial_surfaces):\n\n nsurf = len(initial_surfaces)\n nvert, nedge, ngroup, \\\n surf_ptrs, edge_ptrs, \\\n surf_group, edge_group \\\n = self._compute_topology(initial_surfaces)\n\n self._num = {\n 'surf': len(initial_surfaces),\n 'vert': nvert,\n 'edge': nedge,\n 'group': ngroup,\n }\n\n self._topo = {\n 'surf_ptrs': surf_ptrs,\n 'edge_ptrs': edge_ptrs,\n 'surf_group': surf_group,\n 'edge_group': edge_group,\n }\n\n self._mult = {\n 'vert': numpy.zeros(nvert, int),\n 'edge': numpy.zeros(nedge, int),\n 'diff_vert': numpy.zeros(nvert, int),\n 'diff_edge': numpy.zeros(nedge, int),\n }\n\n self._bspline = {\n 'order': 4 * numpy.ones(ngroup, int),\n 'num_cp': 4 * numpy.ones(ngroup, int),\n 'num_pt': 10 * numpy.ones(ngroup, int),\n }\n\n self._surf_indices = {\n 'df': numpy.zeros((nsurf, 2), int, 'F'),\n 'cp': numpy.zeros((nsurf, 2), int, 'F'),\n 'pt': numpy.zeros((nsurf, 2), int, 'F'),\n }\n\n self._edge_indices = {\n 'df': numpy.zeros((nedge, 2), int, 'F'),\n 'cp': numpy.zeros((nedge, 2), int, 'F'),\n 'pt': numpy.zeros((nedge, 2), int, 'F'),\n }\n\n self._str_indices = {\n 'cp': numpy.zeros((nsurf, 2), int, 'F'),\n 'pt': numpy.zeros((nsurf, 2), int, 'F'),\n }\n\n self._vert_indices = numpy.zeros(nvert, int)\n\n self._size = {\n 'df_str': 0,\n 'df': 0,\n 'cp': 0,\n 'cp_str': 0,\n 'pt_str': 0,\n 'pt': 0,\n }\n\n self.diff = {\n 'surf': numpy.zeros((nsurf, 3, 3), bool, 'F'),\n 'edge': numpy.zeros((nedge, 2), bool, 'F'),\n }\n\n self.hidden = numpy.zeros(nsurf, bool)\n\n self.jac = {\n 'd(df)/d(df_str)': None,\n 'd(cp)/d(df)': None,\n 'd(cp_str)/d(cp)': None,\n 'd(pt_str)/d(cp_str)': None,\n 'd(pt)/d(pt_str)': None,\n }\n\n self.vec = {\n 'df_str': None,\n 'df': None,\n 'cp': None,\n 'cp_str': None,\n 'pt_str': None,\n 'pt': None,\n }", "def py_SurfStatInflate(surf, w=0.5, spherefile=None):\n \n v = surf['coord'].shape[1]\n \n if v <= 81924:\n # MATLAB RAPPING FOR *obj FILE READ IN --> has to be changed...\n if spherefile is None:\n spherefile = 'sphere.obj'\n sphere_mat = eng.SurfStatReadSurf(spherefile)\n sphere = {}\n sphere['tri'] = np.array(sphere_mat['tri']) \n sphere['coord'] = np.array(sphere_mat['coord'])\n \n if v == 81924:\n sphere['tri'] = np.concatenate((sphere['tri'],\n sphere['tri']+v), axis=1)\n col1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n col2 = -1 *sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n x = np.concatenate((col1,col2))\n x = x.reshape(1, len(x))\n row2 = row3 = sphere['coord'][1:3,:]\n y = np.concatenate((row2,row3), axis=1)\n sphere['coord'] = np.concatenate((x,y))\n else:\n if surf['coord'][0,:].mean()/abs(surf['coord'][0,:]).mean() <-0.5:\n row1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n else:\n row1 = -sphere['coord'][0,:] * (sphere['coord'][0,:] < 0) \n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n else:\n if spherefile is None:\n spherefile = 'lh.sphere'\n # MATLAB RAPPING FOR *sphere FILE READ IN --> has to be changed...\n sphere_mat = eng.SurfStatReadSurf(spherefile)\n sphere = {}\n sphere['tri'] = np.array(sphere_mat['tri'])\n sphere['coord'] = np.array(sphere_mat['coord'])\n \n if v == 327684:\n sphere['tri'] = np.concatenate((sphere['tri'],\n sphere['tri']+v), axis=1)\n col1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n col2 = sphere['coord'][0,:] * (sphere['coord'][0,:] > 0)\n x = np.concatenate((col1,col2))\n x = x.reshape(1, len(x))\n row2 = row3 = sphere['coord'][1:3,:]\n y = np.concatenate((row2,row3), axis=1)\n sphere['coord'] = np.concatenate((x,y))\n else:\n if surf['coord'][0,:].mean()/abs(surf['coord'][0,:]).mean() <-0.5:\n row1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n else:\n row1 = sphere['coord'][0,:] * (sphere['coord'][0,:] > 0)\n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n maxs = surf['coord'].max(1)\n mins = surf['coord'].min(1)\n maxsp = sphere['coord'].max(1)\n minsp = sphere['coord'].min(1)\n surfw = surf\n\n for i in range(0,3): \n surfw['coord'][i,:] = ((sphere['coord'][i,:] - minsp[i]) / \\\n (maxsp[i]-minsp[i]) * (maxs[i]-mins[i]) + mins[i]) * w + \\\n surf['coord'][i,:]*(1-w) \n\n return surfw", "def _compute_topology(self, initial_surfaces):\n\n nsurf = len(initial_surfaces)\n surfaces = numpy.zeros((nsurf, 3, 3, 3), float, 'F')\n\n for isurf in xrange(nsurf):\n surface = initial_surfaces[isurf]\n num_u, num_v = surface.shape[:2]\n mid_u1 = int(numpy.floor((num_u - 1) / 2.0))\n mid_u2 = int(numpy.ceil((num_u - 1) / 2.0))\n mid_v1 = int(numpy.floor((num_v - 1) / 2.0))\n mid_v2 = int(numpy.ceil((num_v - 1) / 2.0))\n\n for ind_u in xrange(2):\n for ind_v in xrange(2):\n surfaces[isurf, -ind_u, -ind_v] = surface[-ind_u, -ind_v]\n\n for ind_u in xrange(2):\n surfaces[isurf, -ind_u, 1] += 0.5 * surface[-ind_u, mid_v1] + \\\n 0.5 * surface[-ind_u, mid_v2]\n\n for ind_v in xrange(2):\n surfaces[isurf, 1, -ind_v] += 0.5 * surface[mid_u1, -ind_v] + \\\n 0.5 * surface[mid_u2, -ind_v]\n\n nvert, nedge, surf_ptrs \\\n = BSElib.computesurfconnectivities(nsurf, 1e-16, 1e-10, surfaces)\n\n edge_ptrs \\\n = BSElib.computeedgeconnectivities(nsurf, nedge, surf_ptrs)\n\n ngroup, surf_group, edge_group \\\n = BSElib.computegroups(nsurf, nedge, surf_ptrs)\n\n topology = [nvert, nedge, ngroup, \\\n surf_ptrs, edge_ptrs, \\\n surf_group, edge_group, \\\n ]\n\n return topology", "def _get_single_direction_neighbors(object_idx, ui_v_dist, ui_h_dist):\n neighbor_dict = {}\n vertical_dist = ui_v_dist[object_idx]\n horizontal_dist = ui_h_dist[object_idx]\n bottom_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] > 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n top_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] < 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n right_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] > 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n left_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] < 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n\n if bottom_neighbors.size:\n neighbor_dict[NeighborContextDesc.TOP] = bottom_neighbors[np.argmin(\n vertical_dist[bottom_neighbors])]\n if top_neighbors.size:\n neighbor_dict[NeighborContextDesc.BOTTOM] = top_neighbors[np.argmax(\n vertical_dist[top_neighbors])]\n if right_neighbors.size:\n neighbor_dict[NeighborContextDesc.LEFT] = right_neighbors[np.argmin(\n horizontal_dist[right_neighbors])]\n if left_neighbors.size:\n neighbor_dict[NeighborContextDesc.RIGHT] = left_neighbors[np.argmax(\n horizontal_dist[left_neighbors])]\n\n return neighbor_dict", "def regular_neighborhood(self):\n euler_char = self.num_switches() - self.num_branches()\n return Surface(num_punctures=self.num_complementary_regions(),\n euler_char=euler_char)", "def __init__(self, n_pixels_u, n_pixels_v, detector_size_u, detector_size_v, source_to_detector_dist,\n source_to_object_dist, angular_inc=1, center_of_rot=0, **kwargs):\n\n self.n_pixels_u = n_pixels_u\n self.n_pixels_v = n_pixels_v\n\n self.detector_size_u = detector_size_u\n self.detector_size_v = detector_size_v\n self.source_to_detector_dist = source_to_detector_dist\n self.source_to_object_dist = source_to_object_dist\n self.angular_inc = angular_inc\n\n self.center_of_rot_u = center_of_rot\n\n # All values below are calculated\n\n self.projection_angs = np.arange(0., 360, self.angular_inc)\n self.n_projections = len(self.projection_angs)\n\n self.object_size_x = self.detector_size_u * self.source_to_object_dist / self.source_to_detector_dist\n self.object_size_y = self.detector_size_u * self.source_to_object_dist / self.source_to_detector_dist\n self.object_size_z = self.detector_size_v * self.source_to_object_dist / self.source_to_detector_dist\n\n self.voxel_size_x = self.object_size_x / self.n_pixels_u\n self.voxel_size_y = self.object_size_y / self.n_pixels_u\n self.voxel_size_z = self.object_size_z / self.n_pixels_v\n\n self.pixel_size_u = self.detector_size_u / self.n_pixels_u\n self.pixel_size_v = self.detector_size_v / self.n_pixels_v\n\n self.center_of_rot_y = self.center_of_rot_u * (\n self.source_to_object_dist / self.source_to_detector_dist) * self.pixel_size_u\n\n self.object_ys = (np.arange(self.n_pixels_u, dtype=np.float64) - self.n_pixels_u / 2.) * self.voxel_size_y\n self.object_xs = (np.arange(self.n_pixels_u, dtype=np.float64) - self.n_pixels_u / 2.) * self.voxel_size_x\n self.object_zs = (np.arange(self.n_pixels_v, dtype=np.float64) - self.n_pixels_v / 2.) * self.voxel_size_z\n\n self.detector_us = (np.arange(self.n_pixels_u,\n dtype=np.float64) - self.n_pixels_u / 2.) * self.pixel_size_u\n self.detector_vs = (np.arange(self.n_pixels_v,\n dtype=np.float64) - self.n_pixels_v / 2.) * self.pixel_size_v", "def implicit_surface(self, F, y):\n y = y[:, :, None].expand(-1, -1, self.n_primitives, -1)\n y_latent, ldj = self.invertible_network.inverse(F, y)\n norm = torch.sqrt((y_latent**2).sum(-1))\n\n # <0 denotes internal points\n # >0 denotes external points\n # 0 is the boundary hence our primitive\n return norm - self.radius, ldj", "def buildSurface(self, idx = 0, surface = 1, z = 1, verbose = 1,\\\n strained = False, vacuum = 0, ab = False):\n\n \"\"\"Basis for the selected surface, and the cell repetitions\"\"\"\n rep = np.zeros((3, 4))\n new_base = np.zeros((3, 3))\n\n altBase1 = np.zeros((3, 3))\n altBase2 = np.zeros((3, 3))\n if ab:\n B1, B2 = self.getAB()\n\n if surface == 1:\n old_base = self.base_1\n spec = self.spec_1\n mass = self.mass_1\n pos = self.pos_1\n\n new_base[2, 2] = self.base_1[2, 2] * z\n new_base[0:2, 0:2] = self.cell_1[idx, :, :]\n \n if ab:\n altBase1[:2, :2] = np.matmul(B1[:2, :2], self.rep_1[idx, :, :])\n altBase1[2, 2] = B1[2, 2] * z\n \n rep[0:2, 0:2] = self.rep_1[idx, :, :]\n rep[:, 2] = np.sum(rep, axis = 1)\n elif surface == 2:\n old_base = self.base_2\n spec = self.spec_2\n mass = self.mass_2\n pos = self.pos_2\n\n new_base[2, 2] = self.base_2[2, 2] * z\n new_base[0:2, 0:2] = self.cell_2[idx, :, :]\n \n if ab:\n altBase2[:2, :2] = np.matmul(B2[:2, :2], self.rep_2[idx, :, :])\n altBase2[2, 2] = B2[2, 2] * z\n\n rep[0:2, 0:2] = self.rep_2[idx, :, :]\n rep[:, 2] = np.sum(rep, axis = 1)\n\n \"\"\"Set all hi-lo limits for the cell repetitions\"\"\"\n h = 2\n rep = [rep[0, :].min() - h, rep[0, :].max() + h,\\\n rep[1, :].min() - h, rep[1, :].max() + h,\\\n 0, z - 1]\n\n \"\"\"Extend the cell as spcefied\"\"\"\n pos_ext, spec_ext, mass_ext = ut.extendCell(base = old_base, rep = rep,\\\n pos = pos.T, spec = spec,\\\n mass = mass)\n\n if surface == 1:\n \"\"\"Change to alternative base_1 if specified\"\"\"\n if ab:\n pos_d = np.matmul(np.linalg.inv(new_base), pos_ext)\n new_base = altBase1\n pos_ext = np.matmul(new_base, pos_d)\n if verbose > 0:\n string = \"Surface 1 constructed with alternative base\"\n ut.infoPrint(string)\n\n elif surface == 2:\n \"\"\"Initial rotation\"\"\"\n initRot = np.deg2rad(self.ang[idx])\n\n \"\"\"Rotate the positions pos_rot = R*pos\"\"\"\n pos_ext = ut.rotate(pos_ext, initRot, verbose = verbose - 1)\n\n \"\"\"Construct it in the alternative base_2 if specified\"\"\"\n if ab:\n pos_d = np.matmul(np.linalg.inv(new_base), pos_ext)\n new_base = altBase2\n pos_ext = np.matmul(new_base, pos_d)\n if verbose > 0:\n string = \"Surface 2 constructed with alternative base\"\n ut.infoPrint(string)\n\n \"\"\"Strain the cell if specified\"\"\"\n if strained:\n\n \"\"\"If the cell is to be strained then convert to direct coordinates\"\"\"\n pos_d = np.matmul(np.linalg.inv(new_base), pos_ext)\n\n \"\"\"Convert back to cartesian coordinates.\"\"\"\n if ab:\n \"\"\"Strain it to the alternative base_1\"\"\"\n new_base[0:2, 0:2] = np.matmul(B1[:2, :2], self.rep_1[idx, :, :])\n string = \"Surface 2 strained to match surface 1 with an alternative base\"\n \n else:\n \"\"\"Strain it to the bottom surface\"\"\"\n new_base[0:2, 0:2] = self.cell_1[idx, :, :].copy()\n string = \"Surface 2 strained to match surface 1\"\n\n pos_ext = np.matmul(new_base, pos_d)\n\n if verbose > 0:\n ut.infoPrint(string)\n\n \"\"\"Convert the entire new cell to direct coordinates\"\"\"\n pos_d = np.matmul(np.linalg.inv(new_base), pos_ext)\n\n \"\"\"Remove all positions outside [0, 1)\"\"\"\n pos_d = np.round(pos_d, 8)\n new_base = np.round(new_base, 8)\n\n keep = np.all(pos_d < 1, axis = 0) * np.all(pos_d >= 0, axis = 0)\n pos_d = pos_d[:, keep]\n species = spec_ext[keep]\n mass = mass_ext[keep]\n\n \"\"\"Return to cartesian coordinates and change shape to (...,3)\"\"\"\n pos = np.matmul(new_base, pos_d).T\n\n \"\"\"Add vacuum if specified\"\"\"\n new_base[2, 2] = new_base[2, 2] + vacuum\n if verbose: \n string = \"Vacuum added: %.2f\"\\\n % (vacuum)\n ut.infoPrint(string)\n\n return new_base, pos, species, mass", "def borehole_plane_intersection(self):\n\n # 1. Step: Compute direction vectors to each borehole ==========================================================\n borehole_data = self.borehole_geometry.copy()\n borehole_data[\"depth\"] = 0\n borehole_to_global_coords(\n data=borehole_data,\n x=\"x\",\n y=\"y\",\n z=\"z\",\n depth=\"depth\",\n upward_gradient=\"upward_gradient\",\n azimuth=\"azimuth\",\n )\n\n # Extract relevant columns from borehole data\n _mask = [\"borehole\", \"x_gts\", \"y_gts\", \"z_gts\", \"_trig_x\", \"_trig_y\", \"_trig_z\"]\n bh_data = borehole_data[_mask]\n\n mapper = {\n \"x_gts\": \"x_bh\",\n \"y_gts\": \"y_bh\",\n \"z_gts\": \"z_bh\",\n \"_trig_x\": \"r_x\",\n \"_trig_y\": \"r_y\",\n \"_trig_z\": \"r_z\",\n }\n bh_data = bh_data.rename(columns=mapper)\n\n # 2. Step: Calculate shear-zone unit normals and centroids =====================================================\n sz = self.planes()\n\n # 3. Step: Extract shear-zone borehole geometry ================================================================\n # i.e. only the shear-zones used for computing shear-zone planes.\n sz_bh = self.shearzone_borehole_geometry.copy()\n sz_bh = sz_bh[sz_bh.depth.notna()]\n sz_bh = sz_bh.rename(columns={\"depth\": \"old_depth\"})\n\n # 4. Step: Merge the collected data ============================================================================\n df = sz.merge(sz_bh, on=\"shearzone\").merge(bh_data, on=\"borehole\")\n\n # 5. Step: Calculate new shear-zone borehole intersections. ====================================================\n # Quantities\n n_vec = [\"n_x\", \"n_y\", \"n_z\"]\n r_vec = [\"r_x\", \"r_y\", \"r_z\"]\n bh_coords = [\"x_bh\", \"y_bh\", \"z_bh\"]\n sz_coords = [\"x_c\", \"y_c\", \"z_c\"]\n\n # Depth calculation\n df[\"depth\"] = (\n (df[sz_coords].values - df[bh_coords].values) * df[n_vec].values\n ).sum(axis=1) / (df[n_vec].values * df[r_vec].values).sum(axis=1)\n\n # Calculate global coordinates\n df.loc[:, \"x_sz\"] = df.x_bh + (df.depth * df.r_x)\n df.loc[:, \"y_sz\"] = df.y_bh + (df.depth * df.r_y)\n df.loc[:, \"z_sz\"] = df.z_bh + (df.depth * df.r_z)\n\n return df", "def nearest_sphere_surface(x_input, y_input, z_input):\n\n vm = math.sqrt(sum([x_input**2, y_input**2, z_input**2]))\n return (x_input/vm, y_input/vm, z_input/vm)", "def _get_surface_color_scalars(self, mol, solvent_radius, surface_points, smooth_input):\n grid = FutamuraHash(mol)\n T = grid.T\n radii = {'C':1.75,\n 'O':1.4,\n 'N':1.55,\n 'S':1.8,\n 'P':2.0,\n 'H':1.17,\n 'Z':3.0}\n default_distance = 1.8\n print 'locating nearest atoms'\n scalars = vtk.vtkIntArray()\n scalars.SetNumberOfComponents(1)\n # now locate the intersections\n number_index_map = {}\n for ind in range(len(mol.atoms)):\n number_index_map[mol.atoms[ind].atom_number] = ind\n \n last_atom = 'None'\n if smooth_input:\n new_points = []\n ptctr = 0\n for point in surface_points:\n x_val = y_val = z_val = 0\n # figure out which bin it goes in\n for x_ind in range(0, grid.volume_count_x):\n if point[0] < grid.volume_indices_x[x_ind]:\n break\n else:\n x_val = x_ind\n for y_ind in range(grid.volume_count_y):\n if point[1] < grid.volume_indices_y[y_ind]:\n break\n else:\n y_val = y_ind\n for z_ind in range(grid.volume_count_z):\n if point[2] < grid.volume_indices_z[z_ind]:\n break\n else:\n z_val = z_ind\n\n start_array = [0,0,0]\n end_array = [0,0,0]\n # figure out starts and ends\n counts = [grid.volume_count_x, grid.volume_count_y, grid.volume_count_z]\n keys = [x_val, y_val, z_val]\n for ind in [0,1,2]:\n if keys[ind] == 0:\n start_array[ind] = 0\n end_array[ind] = 2\n elif keys[ind] == counts[ind] - 1:\n start_array[ind] = keys[ind]-1\n end_array[ind] = keys[ind]+1\n else:\n start_array[ind] = keys[ind]-1\n end_array[ind] = keys[ind]+2\n min_dist = 1000.0\n sec_dist = 1000.0\n id2 = -1\n id = -1\n escape = 0 # turns 1 once the correct atom is found\n if smooth_input == 0:\n identification_distance = 0.1\n # figure out if its in range of the last atom chosen (arbitrary, but tends to speed up the calculations)\n if last_atom != 'None':\n dist = math.sqrt(pow(point[0]-last_atom.x,2) + pow(point[1]-last_atom.y,2) + pow(point[2]-last_atom.z,2))\n dif = abs(dist - radii.get(last_atom.atom_type[0], default_distance))\n if dif < identification_distance:\n id = last_atom.atom_number # assume this is it\n escape = 1\n \n if not escape:\n # now look for atoms in the same bin as the last atom\n ky = '%s %s %s'%(x_val,y_val,z_val)\n if ky in T.keys(): # first look in this atoms bin\n for atom in T[ky]:\n # do not retrieve if type H and protonation is turned off\n if self.hydrogens_on or ((not self.hydrogens_on) and atom.atom_type[0] != 'H'):\n dist = math.sqrt(pow(point[0]-atom.x,2) + pow(point[1]-atom.y,2) + pow(point[2]-atom.z,2))\n if abs(dist - radii.get(atom.atom_type[0], default_distance)) < identification_distance:\n id = atom.atom_number # assume this is it\n escape = 1\n break\n if not escape:\n for i in range(start_array[0], end_array[0]):\n for j in range(start_array[1], end_array[1]):\n for k in range(start_array[2], end_array[2]):\n key2 = '%s %s %s'%(i,j,k)\n #if key2 != ky:\n if key2 in T.keys():\n for atom in T[key2]:\n if self.hydrogens_on or ((not self.hydrogens_on) and atom.atom_type[0] != 'H'):\n dist = math.sqrt(pow(point[0]-atom.x,2) + pow(point[1]-atom.y,2) + pow(point[2]-atom.z,2))\n if not smooth_input:\n if abs(dist - radii.get(atom.atom_type[0], default_distance)) < identification_distance:\n id = atom.atom_number\n escape = 1\n break\n elif dist < min_dist:\n min_dist = dist\n id = atom.atom_number\n else:\n if dist < min_dist:\n sec_dist = min_dist\n id2 = id\n min_dist = dist\n id = atom.atom_number\n if escape:\n break\n if escape:\n break\n if escape:\n break\n # assign the index\n last_atom = mol.atoms[number_index_map[id]]\n scalars.InsertTuple1(ptctr, number_index_map[id])\n # smooth the data\n fitting_back_distance = 0.2\n if smooth_input:\n x2 = point[0]\n y2 = point[1]\n z2 = point[2]\n if id2 != -1: # more than one intersection is necessary\n sec_last_atom = mol.atoms[number_index_map[id2]]\n if abs(min_dist-radii.get(last_atom.atom_type[0], default_distance)) < fitting_back_distance: # if this atom is close enough\n if abs(sec_dist-radii.get(sec_last_atom.atom_type[0], default_distance)) > 0.4: # if second atom is far enough away\n r = radii.get(last_atom.atom_type[0], default_distance)\n d = min_dist\n x = last_atom.x\n y = last_atom.y\n z = last_atom.z\n x2 = ((r/d)*(point[0]-x)) + x\n y2 = ((r/d)*(point[1]-y)) + y\n z2 = ((r/d)*(point[2]-z)) + z\n new_points.append([x2,y2,z2])\n \n ptctr += 1\n if smooth_input:\n return scalars,new_points\n else:\n return scalars", "def __init__(self, radius, dimensions, index=N_HPFS):\n _OpticalElement.__init__(self, 'itop mirror', index)\n self.add_boundary(SphericalSurface(\n [0., 0., radius - dimensions[2]], float(radius), name='-z',\n reflective=True))\n self.add_boundary(PlaneSurface(\n [0., 0., 1.], [0., 0., 0.], name='+z'))\n self.add_boundary(PlaneSurface(\n [0., 1., 0.], [0., dimensions[1]/2., 0.], name='+y'))\n self.add_boundary(PlaneSurface(\n [0., -1., 0.], [0., -dimensions[1]/2., 0.], name='-y'))\n self.add_boundary(PlaneSurface(\n [1., 0., 0.], [dimensions[0]/2., 0., 0.], name='+x'))\n self.add_boundary(PlaneSurface(\n [-1., 0., 0.], [-dimensions[0]/2., 0., 0.], name='-x'))", "def corner_to_train(corners, sphere_center, resolution=0.50, x=(0, 90), y=(-50, 50), z=(-4.5, 5.5), scale=4, min_value=np.array([0., -50., -4.5])):\n x_logical = np.logical_and((corners[:, :, 0] < x[1]), (corners[:, :, 0] >= x[0]))\n y_logical = np.logical_and((corners[:, :, 1] < y[1]), (corners[:, :, 1] >= y[0]))\n z_logical = np.logical_and((corners[:, :, 2] < z[1]), (corners[:, :, 2] >= z[0]))\n xyz_logical = np.logical_and(x_logical, np.logical_and(y_logical, z_logical)).all(axis=1)\n train_corners = corners[xyz_logical].copy()\n sphere_center = sphere_to_center(sphere_center, resolution=resolution, scale=scale, min_value=min_value) #sphere to center\n for index, (corner, center) in enumerate(zip(corners[xyz_logical], sphere_center)):\n train_corners[index] = corner - center\n return train_corners", "def createSurfaceGeo(self):\n self.surfGeo = dict()\n r = self.geoParam['CylinderLightGuideRadius']\n self.surfGeo[r] = 'LightGuide'\n #self.material = {'Moderator':None,'Detector':None,'LightGuide':None}\n while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):\n r += self.geoParam['DetectorThickness']\n self.surfGeo[r] = 'Detector'\n r += self.geoParam['DetectorSpacing']\n if (r < self.geoParam['CylinderRadius']):\n self.surfGeo[r] = 'LightGuide'\n return self.surfGeo", "def build_halo_mask(fixed_depth=30, margin=21, min_fragment=10):\n assert margin % 2 is not 0, \"Margin should be odd\"\n\n rr, cc = circle(margin / 2, margin / 2, margin / 2 + 1, shape=(margin, margin))\n structure_element = numpy.zeros((margin, margin))\n structure_element[rr, cc] = 1\n structure_element = numpy.repeat(numpy.expand_dims(numpy.expand_dims(structure_element, 0), 0), fixed_depth, 0)\n\n sel = torch.from_numpy(structure_element).float().to(device)\n\n def f(label):\n \"\"\"\n \n :param label: batch of instance levels each instance must have unique id\n :return: labels, masks and object_lists used by halo loss\n \"\"\"\n back = numpy.zeros((label.shape[0], fixed_depth, label.shape[1], label.shape[2]))\n object_list = []\n for i in range(label.shape[0]):\n bincount = numpy.bincount(label[i].flatten())\n pixels = numpy.where(bincount > min_fragment)[0]\n if len(pixels) > fixed_depth:\n pixels = pixels[:fixed_depth]\n warnings.warn(\"Not all objects fits in fixed depth\", RuntimeWarning)\n\n for l, v in enumerate(pixels):\n back[i, l, label[i] == v] = 1.\n object_list.append(numpy.array(range(l + 1)))\n\n labels = torch.from_numpy(back).float().to(device)\n masks = F.conv2d(labels, sel, groups=fixed_depth, padding=margin / 2)\n \n masks[masks > 0] = 1.\n masks[labels > 0] = 2.\n masks[:, 0, :, :] = 1.\n \n weights=masks.sum(-1,keepdim=True).sum(-2,keepdim=True)\n weights[weights==0.]=1.\n \n masks = masks/weights\n \n return labels, masks, object_list\n\n return f", "def wireframe(self, projection_type, canvas_dimensions):\n # Configure viewportself.screen_dimensions = {\n self.screen_dimensions = {\n \"width\": canvas_dimensions['width'],\n \"height\": canvas_dimensions['height']\n }\n\n self.projection.viewport = self.screen_dimensions\n self.projection.projection_type = projection_type\n self.projection.camera = self.cameras[0]\n self.projection.region_width = self.screen_dimensions.get('width')\n self.projection.region_height = self.screen_dimensions.get('height')\n\n # Draw polygons for each object\n projected_objects = []\n for obj in self.objects:\n print('Rendering: ', obj)\n\n world_transformation = obj.translate(\n obj.rotate(obj.scale(obj.vertices))\n )\n camera_transformation = obj.rotate(\n obj.translate(world_transformation, np.array(\n [\n -self.projection.camera.translation[0],\n -self.projection.camera.translation[1],\n -self.projection.camera.translation[2]\n ]\n )), np.array(\n [\n -self.projection.camera.rotation[0],\n -self.projection.camera.rotation[1],\n -self.projection.camera.rotation[2]\n ]\n \n )\n )\n projected_view = self.projection.project_all(camera_transformation)\n normalized_view = obj.normalize(\n projected_view, self.projection.viewport\n )\n projected_faces = []\n for face in obj.faces:\n poly = []\n for vertex_index in face:\n poly.append(\n [\n int(normalized_view[vertex_index][0]),\n int(normalized_view[vertex_index][1]),\n int(camera_transformation[vertex_index][2])\n ]\n )\n projected_faces.append(poly)\n center = list(obj.calculate_center(normalized_view))\n vertices = [ [int(p[0]), int(p[1]), int(p[2])] for p in normalized_view]\n # print('calculated_center: ', center)\n # print(''vertices)\n projected_objects.append({\n 'vertices': vertices,\n 'faces': obj.clip(self.projection.camera.translation, projected_faces),\n 'center': [ int(coord) for coord in obj.calculate_center(normalized_view) ],\n })\n print(projected_objects[0]['faces'][:20])\n return projected_objects", "def auto_rivet():\n sel_list = pm.ls(sl=1)\n\n # the last selection is the mesh\n objects = sel_list[:-1]\n geo = sel_list[-1]\n\n # get the closest point to the surface\n geo_shape = geo.getShape()\n\n follicles = []\n\n for obj in objects:\n # pivot point of the obj\n pivot = obj.getRotatePivot(space='world')\n uv = geo_shape.getUVAtPoint(pivot, space='world')\n\n # create a hair follicle\n follicle = pm.nt.Follicle()\n follicles.append(follicle)\n follicle.simulationMethod.set(0)\n geo_shape.worldMatrix >> follicle.inputWorldMatrix\n geo_shape.outMesh >> follicle.inputMesh\n follicle.parameterU.set(uv[0])\n follicle.parameterV.set(uv[1])\n\n # parent the object to the follicles transform node\n follicle_transform = follicle.getParent()\n\n follicle.outTranslate >> follicle_transform.translate\n follicle.outRotate >> follicle_transform.rotate\n\n pm.parent(obj, follicle_transform)\n\n return follicles", "def drawIsoSurfaces0( self ):\n #research\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n v= vtk.vtkAppendPolyData()\n \n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"nth\")!=None and modelNode.GetDisplayVisibility()==1 :\n v.AddInput(modelNode.GetPolyData())\n \n modeller = vtk.vtkImplicitModeller()\n modeller.SetInput(v.GetOutput())\n modeller.SetSampleDimensions(self.dim.value,self.dim.value,self.dim.value)\n modeller.SetCapping(0)\n modeller.SetAdjustBounds(self.abonds.value)\n modeller.SetProcessModeToPerVoxel() \n modeller.SetAdjustDistance(self.adist.value/100)\n modeller.SetMaximumDistance(self.maxdist.value/100) \n \n contourFilter = vtk.vtkContourFilter()\n contourFilter.SetNumberOfContours(self.nb.value)\n contourFilter.SetInputConnection(modeller.GetOutputPort()) \n contourFilter.ComputeNormalsOn()\n contourFilter.ComputeScalarsOn()\n contourFilter.UseScalarTreeOn()\n contourFilter.SetValue(self.contour.value,self.contourValue.value)\n contourFilter.SetValue(self.contour2.value,self.contourValue2.value)\n contourFilter.SetValue(self.contour3.value,self.contourValue3.value)\n contourFilter.SetValue(self.contour4.value,self.contourValue4.value)\n contourFilter.SetValue(self.contour5.value,self.contourValue5.value)\n\n isoSurface = contourFilter.GetOutput()\n self.AddContour(isoSurface)", "def alignMonoPlane(entry,prec=1E-4,seed_index=0,supercell=2,\n c_mag=50,dist_from_plane=3):\n\n\n # Keep original copy of structure\n s = copy.deepcopy(entry[0])\n\n\n new_latt,fit_fracs= getNewLattice(entry,dim=2,prec=prec,seed_index=seed_index,\n supercell=supercell,c_mag=c_mag)\n\n\n \n\n # Identify plane to translate atoms towards\n\n plane = Plane(Point3D(s.sites[seed_index].coords),\n normal_vector=new_latt[2])\n \n # Create list of translationss\n trans = list(itertools.product([1,-1,0],repeat=3))\n\n lat = np.array(s.lattice.as_dict()['matrix'])\n final_sites = []\n i=0\n \n # Ensure that the atoms are nearby each other\n for site in [x.coords for x in s.sites]:\n point = Point3D(site)\n if 1==1:\n\n news = []\n \n # translate atomic sites to see which position is closest to plane\n for t in trans:\n point = Point3D(site+np.dot(np.transpose(lat),t))\n news.append([float(plane.distance(point)),t])\n news.sort(key = lambda x:x[0])\n for new in news:\n if not np.any([magni((site+np.dot(np.transpose(lat),new[1]))-x)<=prec for x in final_sites]):\n final_sites.append(site+\n np.dot(np.transpose(lat),new[1]))\n break\n i+=1\n \n # Create new lattice matricies\n lat1 = np.array([new_latt[0],new_latt[1],new_latt[2]])\n lat2 = np.array([new_latt[1],new_latt[0],new_latt[2]])\n\n # Generate atomic fractions\n new_fracs1 = np.linalg.solve(lat1.T,np.array(final_sites).T).T\n new_fracs2 = np.linalg.solve(lat2.T,np.array(final_sites).T).T\n\n species=fit_fracs[1]\n\n return([species,new_fracs1,lat1],[species,new_fracs2,lat2])", "def Sphere_Bezier(refinements=0):\n \n S = np.array([0,0,0])\n T = np.array([0,0,1])\n V = np.array([0,0,0,0.5,0.5,1,1,1])\n q = 2\n m = 4\n Pj = np.array([[0,0,1],[1,0,1],[1,0,0],[1,0,-1],[0,0,-1]])\n wj = np.array([1,1/np.sqrt(2),1,1/np.sqrt(2),1])\n\n sphere = Bezier.MakeRevolvedSurface(S,T,2*np.pi,q,V,m,Pj,wj)\n \n # Make knot refinements before decomposing\n if refinements:\n uniqueU = np.unique(sphere.U)\n for i in xrange(uniqueU.size-1):\n XI = np.linspace(uniqueU[i],uniqueU[i+1],refinements+2)[1:-1]\n sphere.refineknotvector(XI,'U')\n uniqueV = np.unique(sphere.V)\n for i in xrange(uniqueV.size-1):\n XI = np.linspace(uniqueV[i],uniqueV[i+1],refinements+2)[1:-1]\n sphere.refineknotvector(XI,'V')\n \n sphere=sphere.decompose()\n\n element_list = [Bezier.BezierElement(sphere[i,:,:,:]) for i in xrange(sphere.shape[0])]\n \n \n sphere_domain = Bezier.Domain(element_list)\n mesh = Bezier.Mesh([sphere_domain])\n \n mesh.dList[0].edges = mesh.numElements * 2\n mesh.dList[0].corners = 8\n mesh.dList[0].extraordinary_points = 8\n\n \n return mesh", "def voronoi_sub_mask_1d_index_to_pixeliztion_1d_index_from_grids_and_geometry(\n grid,\n mask_1d_index_to_nearest_pixelization_1d_index,\n sub_mask_1d_index_to_mask_1d_index,\n pixel_centres,\n pixel_neighbors,\n pixel_neighbors_size,\n):\n\n sub_mask_1d_index_to_pixeliztion_1d_index = np.zeros((grid.shape[0]))\n\n for sub_mask_1d_index in range(grid.shape[0]):\n\n nearest_pixelization_1d_index = mask_1d_index_to_nearest_pixelization_1d_index[\n sub_mask_1d_index_to_mask_1d_index[sub_mask_1d_index]\n ]\n\n while True:\n\n nearest_pixelization_pixel_center = pixel_centres[\n nearest_pixelization_1d_index\n ]\n\n sub_pixel_to_nearest_pixelization_distance = (\n (grid[sub_mask_1d_index, 0] - nearest_pixelization_pixel_center[0]) ** 2\n + (grid[sub_mask_1d_index, 1] - nearest_pixelization_pixel_center[1])\n ** 2\n )\n\n closest_separation_from_pixelization_to_neighbor = 1.0e8\n\n for neighbor_pixelization_1d_index in range(\n pixel_neighbors_size[nearest_pixelization_1d_index]\n ):\n\n neighbor = pixel_neighbors[\n nearest_pixelization_1d_index, neighbor_pixelization_1d_index\n ]\n\n separation_from_neighbor = (\n grid[sub_mask_1d_index, 0] - pixel_centres[neighbor, 0]\n ) ** 2 + (grid[sub_mask_1d_index, 1] - pixel_centres[neighbor, 1]) ** 2\n\n if (\n separation_from_neighbor\n < closest_separation_from_pixelization_to_neighbor\n ):\n closest_separation_from_pixelization_to_neighbor = (\n separation_from_neighbor\n )\n closest_neighbor_pixelization_1d_index = (\n neighbor_pixelization_1d_index\n )\n\n neighboring_pixelization_1d_index = pixel_neighbors[\n nearest_pixelization_1d_index, closest_neighbor_pixelization_1d_index\n ]\n sub_pixel_to_neighboring_pixelization_distance = (\n closest_separation_from_pixelization_to_neighbor\n )\n\n if (\n sub_pixel_to_nearest_pixelization_distance\n <= sub_pixel_to_neighboring_pixelization_distance\n ):\n sub_mask_1d_index_to_pixeliztion_1d_index[\n sub_mask_1d_index\n ] = nearest_pixelization_1d_index\n break\n else:\n nearest_pixelization_1d_index = neighboring_pixelization_1d_index\n\n return sub_mask_1d_index_to_pixeliztion_1d_index", "def apply(self, mode='lateral'):\n num_lat_slices = self.img3d.shape[0]\n num_cor_slices = self.img3d.shape[2]\n bin_mask = np.zeros(self.mask3d.shape)\n x,y,z = np.where(self.mask3d==self.vertebra_id)\n bin_mask[np.min(x):np.max(x), np.min(y):np.max(y), np.min(z):np.max(z)] = 1\n if mode=='lateral' or mode=='fuse':\n mask_lat = np.zeros((6, self.mask3d.shape[0], self.mask3d.shape[1], self.mask3d.shape[2]))\n img_lat = np.zeros(self.img3d.shape)\n binary_lat = np.zeros(self.mask3d.shape)\n # for each lateral slice\n for idx in range(num_lat_slices):\n img_slice, mask_slice = np.copy(self.img3d[idx, :, :]), np.copy(self.mask3d[idx, :, :])\n xloc, yloc = np.where(mask_slice==self.vertebra_id)\n # check if vertebra is present in image\n if xloc.shape[0]==0:\n # if not keep mask as it is\n mask_lat[:,idx, :, :] = self.get_one_hot(mask_slice)\n img_lat[idx, :, :] = img_slice\n else:\n min_x, max_x = np.min(xloc), np.max(xloc)\n min_y, max_y = np.min(yloc), np.max(yloc)\n inpainted_img, inpainted_mask, binary_mask = self.inpaint(img_slice, mask_slice, min_x, max_x, min_y, max_y)\n mask_lat[:,idx, :, :] = inpainted_mask\n img_lat[idx,:, :] = inpainted_img\n binary_lat[idx,:,:] = binary_mask\n\n\n if mode=='coronal' or mode=='fuse':\n mask_cor = np.zeros((6, self.mask3d.shape[0], self.mask3d.shape[1], self.mask3d.shape[2]))\n img_cor = np.zeros(self.img3d.shape)\n binary_cor = np.zeros(self.mask3d.shape)\n # for each coronal slice\n for idx in range(num_cor_slices):\n img_slice, mask_slice = np.copy(self.img3d[:, :, idx]), np.copy(self.mask3d[:, :, idx])\n xloc, yloc = np.where(mask_slice==self.vertebra_id)\n # check if vertebra is present in image\n if xloc.shape[0]==0:\n # if not keep mask as it is\n mask_cor[:, :, :, idx] = self.get_one_hot(mask_slice)\n img_cor[:, :, idx] = img_slice\n else:\n min_x, max_x = np.min(xloc), np.max(xloc)\n min_y, max_y = np.min(yloc), np.max(yloc)\n # else remove fractured vertebra and inpaint\n inpainted_img, inpainted_mask, binary_mask = self.inpaint(img_slice, mask_slice, min_x, max_x, min_y, max_y, 'coronal')\n mask_cor[:, :, :, idx] = inpainted_mask\n img_cor[:, :, idx] = inpainted_img\n binary_cor[:,:,idx] = binary_mask\n \n # return to a one channel mask and convert labels back\n if mode=='lateral':\n mask_lat = np.argmax(mask_lat, axis=0)\n mask_lat = self.map_class_to_vert(mask_lat)\n self.mask3d = mask_lat\n self.img3d = img_lat\n elif mode=='coronal':\n mask_cor = np.argmax(mask_cor, axis=0)\n mask_cor = self.map_class_to_vert(mask_cor)\n self.mask3d = mask_cor\n self.img3d = img_cor\n elif mode=='fuse':\n mask_fuse = mask_cor*0.5+mask_lat*0.5\n mask_fuse = np.argmax(mask_fuse, axis=0)\n mask_fuse = self.map_class_to_vert(mask_fuse)\n self.mask3d = mask_fuse\n self.img3d = (img_lat+img_cor)/2\n \n # save result\n self.mask3d = self.mask3d.astype(np.uint8)\n self.img3d = self.img3d.astype(np.float32)\n \n # put back if we padded and cropped\n if self.padz and self.padx:\n self.orig_img3d[:,self.ymin:self.ymax, :] = self.img3d[self.xcrop1:-self.xcrop2,:,self.zcrop1:-self.zcrop2]\n self.orig_mask3d[:,self.ymin:self.ymax, :] = self.mask3d[self.xcrop1:-self.xcrop2,:,self.zcrop1:-self.zcrop2]\n elif self.padz and not self.padx:\n self.orig_img3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, :] = self.img3d[:,:,self.zcrop1:-self.zcrop2]\n self.orig_mask3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, :] = self.mask3d[:,:,self.zcrop1:-self.zcrop2]\n elif not self.padz and self.padx:\n self.orig_img3d[:,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.img3d[self.xcrop1:-self.xcrop2,:,:]\n self.orig_mask3d[:,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.mask3d[self.xcrop1:-self.xcrop2,:,:]\n else:\n self.orig_img3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.img3d\n self.orig_mask3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.mask3d\n \n img = return_scan_to_orig(self.orig_img3d, self.mask_affine, self.mask_header, self.zooms)\n nib.save(img, self.inpainted_img_path)\n\n mask_fuse = return_scan_to_orig(self.orig_mask3d, self.mask_affine, self.mask_header, self.zooms, np.uint8)\n nib.save(mask_fuse, self.inpainted_mask_path)\n print('Inpaint mask and image saved at: ', self.inpainted_mask_path, self.inpainted_img_path)", "def populateCenters(matrix, row, col, frame, midRange, roughness, perturbance):\n maxIndex = matrix.shape[0]-1\n quarterRange = midRange/2\n\n pf = perturbanceFactor(matrix.shape[0], midRange, perturbance)\n noiseLevel = roughness * pf\n\n \"\"\"\n For each subdivided cube, getIndexRef is used to get the indicies, and center is used\n to determine the points that should be averaged and the point to be set. \n setValue does the calculations.\n \"\"\"\n indexRef = getIndexRef(row, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n\n #printAllowCancel(matrix)", "def drawIsoSurfaces0(self):\r\n # research\r\n profbox()\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n v = vtk.vtkAppendPolyData()\r\n\r\n for modelNode in modelNodes.values():\r\n if modelNode.GetAttribute(\"nth\") != None and modelNode.GetDisplayVisibility() == 1 :\r\n v.AddInput(modelNode.GetPolyData())\r\n\r\n modeller = vtk.vtkImplicitModeller()\r\n modeller.SetInput(v.GetOutput())\r\n modeller.SetSampleDimensions(self.dim.value, self.dim.value, self.dim.value)\r\n modeller.SetCapping(0)\r\n modeller.SetAdjustBounds(self.abonds.value)\r\n modeller.SetProcessModeToPerVoxel()\r\n modeller.SetAdjustDistance(self.adist.value / 100)\r\n modeller.SetMaximumDistance(self.maxdist.value / 100)\r\n\r\n contourFilter = vtk.vtkContourFilter()\r\n contourFilter.SetNumberOfContours(self.nb.value)\r\n contourFilter.SetInputConnection(modeller.GetOutputPort())\r\n contourFilter.ComputeNormalsOn()\r\n contourFilter.ComputeScalarsOn()\r\n contourFilter.UseScalarTreeOn()\r\n contourFilter.SetValue(self.contour.value, self.contourValue.value)\r\n contourFilter.SetValue(self.contour2.value, self.contourValue2.value)\r\n contourFilter.SetValue(self.contour3.value, self.contourValue3.value)\r\n contourFilter.SetValue(self.contour4.value, self.contourValue4.value)\r\n contourFilter.SetValue(self.contour5.value, self.contourValue5.value)\r\n\r\n isoSurface = contourFilter.GetOutput()\r\n self.AddContour(isoSurface)", "def _map_elements3(self, nid_map, model, unused_j, unused_dim_max,\n nid_cp_cd, xref_loads=True):\n settings = self.gui.settings # type: Settings\n\n # these normals point inwards\n # 4\n # / | \\\n # / | \\\n # 3-------2\n # \\ | /\n # \\ | /\n # 1\n _ctetra_faces = (\n (0, 1, 2), # (1, 2, 3),\n (0, 3, 1), # (1, 4, 2),\n (0, 3, 2), # (1, 3, 4),\n (1, 3, 2), # (2, 4, 3),\n )\n\n # these normals point inwards\n #\n #\n #\n #\n # /4-----3\n # / /\n # / 5 /\n # / \\ /\n # / \\ /\n # 1---------2\n _cpyram_faces = (\n (0, 1, 2, 3), # (1, 2, 3, 4),\n (1, 4, 2), # (2, 5, 3),\n (2, 4, 3), # (3, 5, 4),\n (0, 3, 4), # (1, 4, 5),\n (0, 4, 1), # (1, 5, 2),\n )\n\n # these normals point inwards\n # /6\n # / | \\\n # / | \\\n # 3\\ | \\\n # | \\ /4-----5\n # | \\/ /\n # | / \\ /\n # | / \\ /\n # | / \\ /\n # 1---------2\n _cpenta_faces = (\n (0, 2, 1), # (1, 3, 2),\n (3, 4, 5), # (4, 5, 6),\n\n (0, 1, 4, 3), # (1, 2, 5, 4), # bottom\n (1, 2, 5, 4), # (2, 3, 6, 5), # right\n (0, 3, 5, 2), # (1, 4, 6, 3), # left\n )\n\n # these normals point inwards\n # 8----7\n # /| /|\n # / | / |\n # / 5-/--6\n # 4-----3 /\n # | / | /\n # | / | /\n # 1-----2\n _chexa_faces = (\n (4, 5, 6, 7), # (5, 6, 7, 8),\n (0, 3, 2, 1), # (1, 4, 3, 2),\n (1, 2, 6, 5), # (2, 3, 7, 6),\n (2, 3, 7, 6), # (3, 4, 8, 7),\n (0, 4, 7, 3), # (1, 5, 8, 4),\n (0, 6, 5, 4), # (1, 7, 6, 5),\n )\n\n elements, nelements, unused_superelements = get_elements_nelements_unvectorized(model)\n xyz_cid0 = self.xyz_cid0\n pids_array = np.zeros(nelements, dtype='int32')\n eids_array = np.zeros(nelements, dtype='int32')\n mcid_array = np.full(nelements, -1, dtype='int32')\n material_theta_array = np.full(nelements, np.nan, dtype='float32')\n dim_array = np.full(nelements, -1, dtype='int32')\n nnodes_array = np.full(nelements, -1, dtype='int32')\n\n # quality\n min_interior_angle = np.zeros(nelements, 'float32')\n max_interior_angle = np.zeros(nelements, 'float32')\n dideal_theta = np.zeros(nelements, 'float32')\n max_skew_angle = np.zeros(nelements, 'float32')\n max_warp_angle = np.zeros(nelements, 'float32')\n max_aspect_ratio = np.zeros(nelements, 'float32')\n area = np.zeros(nelements, 'float32')\n area_ratio = np.zeros(nelements, 'float32')\n taper_ratio = np.zeros(nelements, 'float32')\n min_edge_length = np.zeros(nelements, 'float32')\n normals = np.full((nelements, 3), np.nan, 'float32')\n\n nids_list = []\n ieid = 0\n cell_offset = 0\n\n dtype = get_numpy_idtype_for_vtk()\n\n cell_types_array = np.zeros(nelements, dtype=dtype)\n cell_offsets_array = np.zeros(nelements, dtype=dtype)\n\n cell_type_point = 1 # vtk.vtkVertex().GetCellType()\n cell_type_line = 3 # vtk.vtkLine().GetCellType()\n cell_type_tri3 = 5 # vtkTriangle().GetCellType()\n cell_type_tri6 = 22 # vtkQuadraticTriangle().GetCellType()\n cell_type_quad4 = 9 # vtkQuad().GetCellType()\n #cell_type_quad8 = 23 # vtkQuadraticQuad().GetCellType()\n cell_type_tetra4 = 10 # vtkTetra().GetCellType()\n cell_type_tetra10 = 24 # vtkQuadraticTetra().GetCellType()\n cell_type_pyram5 = 14 # vtkPyramid().GetCellType()\n #cell_type_pyram13 = 27 # vtk.vtkQuadraticPyramid().GetCellType()\n cell_type_penta6 = 13 # vtkWedge().GetCellType()\n cell_type_penta15 = 26 # vtkQuadraticWedge().GetCellType()\n cell_type_hexa8 = 12 # vtkHexahedron().GetCellType()\n cell_type_hexa20 = 25 # vtkQuadraticHexahedron().GetCellType()\n\n # per gui/testing_methods.py/create_vtk_cells_of_constant_element_type\n #1 = vtk.vtkVertex().GetCellType()\n #3 = vtkLine().GetCellType()\n #5 = vtkTriangle().GetCellType()\n #9 = vtk.vtkQuad().GetCellType()\n #10 = vtkTetra().GetCellType()\n #vtkPenta().GetCellType()\n #vtkHexa().GetCellType()\n #vtkPyram().GetCellType()\n\n skipped_etypes = set()\n all_nids = nid_cp_cd[:, 0]\n ieid = 0\n for eid, elem in sorted(elements.items()):\n if ieid % 5000 == 0 and ieid > 0:\n print(' map_elements = %i' % ieid)\n etype = elem.type\n nnodes = None\n nids = None\n pid = None\n cell_type = None\n inids = None\n\n dideal_thetai = np.nan\n min_thetai = np.nan\n max_thetai = np.nan\n #max_thetai = np.nan\n max_skew = np.nan\n max_warp = np.nan\n aspect_ratio = np.nan\n areai = np.nan\n area_ratioi = np.nan\n taper_ratioi = np.nan\n min_edge_lengthi = np.nan\n normali = np.nan\n if etype in ['CTRIA3', 'CTRIAR', 'CTRAX3', 'CPLSTN3', 'CPLSTS3']:\n nids = elem.nodes\n pid = elem.pid\n cell_type = cell_type_tri3 # 5\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3 = xyz_cid0[inids, :]\n out = tri_quality(p1, p2, p3)\n (areai, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out\n normali = np.cross(p1 - p2, p1 - p3)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n nnodes = 3\n dim = 2\n\n elif etype in {'CQUAD4', 'CQUADR', 'CPLSTN4', 'CPLSTS4', 'CQUADX4',\n 'CQUAD1'}: # nastran95\n nids = elem.nodes\n pid = elem.pid\n cell_type = cell_type_quad4 #9\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4 = xyz_cid0[inids, :]\n out = quad_quality(elem, p1, p2, p3, p4)\n (areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out\n normali = np.cross(p1 - p3, p2 - p4)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n nnodes = 4\n dim = 2\n\n elif etype in ['CTRIA6']:\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_tri3\n inids = np.searchsorted(all_nids, nids[:3])\n nids = nids[:3]\n p1, p2, p3 = xyz_cid0[inids, :]\n nnodes = 3\n else:\n cell_type = cell_type_tri6\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4, unused_p5, unused_p6 = xyz_cid0[inids, :]\n nnodes = 6\n out = tri_quality(p1, p2, p3)\n (areai, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out\n normali = np.cross(p1 - p2, p1 - p3)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n dim = 2\n elif etype == 'CQUAD8':\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_tri3\n inids = np.searchsorted(all_nids, nids[:4])\n nids = nids[:4]\n p1, p2, p3, p4 = xyz_cid0[inids, :]\n nnodes = 4\n else:\n cell_type = cell_type_tri6\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4 = xyz_cid0[inids[:4], :]\n nnodes = 8\n out = quad_quality(elem, p1, p2, p3, p4)\n (areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out\n normali = np.cross(p1 - p3, p2 - p4)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n nnodes = 4\n dim = 2\n\n elif etype == 'CSHEAR':\n nids = elem.nodes\n pid = elem.pid\n cell_type = cell_type_quad4 #9\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4 = xyz_cid0[inids, :]\n out = quad_quality(elem, p1, p2, p3, p4)\n (areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out\n normali = np.cross(p1 - p3, p2 - p4)\n nnodes = 4\n dim = 2\n\n elif etype == 'CTETRA':\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_tetra4\n nids = nids[:4]\n nnodes = 4\n else:\n cell_type = cell_type_tetra10\n nnodes = 10\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _ctetra_faces, nids, nid_map, xyz_cid0)\n dim = 3\n\n elif etype == 'CHEXA':\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_hexa8\n nids = nids[:8]\n nnodes = 8\n else:\n cell_type = cell_type_hexa20\n nnodes = 20\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _chexa_faces, nids, nid_map, xyz_cid0)\n dim = 3\n\n elif etype == 'CPENTA':\n nids = elem.nodes\n pid = elem.pid\n\n if None in nids:\n cell_type = cell_type_penta6\n nids = nids[:6]\n nnodes = 6\n else:\n cell_type = cell_type_penta15\n nnodes = 15\n\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _cpenta_faces, nids, nid_map, xyz_cid0)\n dim = 3\n elif etype == 'CPYRAM':\n # TODO: assuming 5\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_pyram5\n nids = nids[:5]\n nnodes = 5\n else:\n cell_type = cell_type_penta15\n nnodes = 15\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _cpyram_faces, nids, nid_map, xyz_cid0)\n dim = 3\n elif etype in ['CELAS2', 'CELAS4', 'CDAMP4']:\n # these can have empty nodes and have no property\n # CELAS1: 1/2 GRID/SPOINT and pid\n # CELAS2: 1/2 GRID/SPOINT, k, ge, and s\n # CELAS3: 1/2 SPOINT and pid\n # CELAS4: 1/2 SPOINT and k\n nids = elem.nodes\n assert nids[0] != nids[1]\n if None in nids:\n assert nids[0] is not None, nids\n assert nids[1] is None, nids\n nids = [nids[0]]\n cell_type = cell_type_point\n nnodes = 1\n else:\n nids = elem.nodes\n assert nids[0] != nids[1]\n cell_type = cell_type_line\n nnodes = 2\n inids = np.searchsorted(all_nids, nids)\n pid = 0\n dim = 0\n elif etype in ['CBUSH', 'CBUSH1D', 'CBUSH2D',\n 'CELAS1', 'CELAS3',\n 'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP5',\n 'CFAST', 'CGAP', 'CVISC']:\n nids = elem.nodes\n assert nids[0] != nids[1]\n assert None not in nids, 'nids=%s\\n%s' % (nids, elem)\n pid = elem.pid\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n nnodes = 2\n dim = 0\n elif etype in ['CBAR', 'CBEAM']:\n nids = elem.nodes\n pid = elem.pid\n pid_ref = model.Property(pid)\n areai = pid_ref.Area()\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = 2\n dim = 1\n elif etype in ['CROD', 'CTUBE']:\n nids = elem.nodes\n pid = elem.pid\n pid_ref = model.Property(pid)\n areai = pid_ref.Area()\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = 2\n dim = 1\n elif etype == 'CONROD':\n nids = elem.nodes\n areai = elem.Area()\n pid = 0\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = 2\n dim = 1\n #------------------------------\n # rare\n #elif etype == 'CIHEX1':\n #nids = elem.nodes\n #pid = elem.pid\n #cell_type = cell_type_hexa8\n #inids = np.searchsorted(all_nids, nids)\n #min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n #_chexa_faces, nids, nid_map, xyz_cid0)\n #nnodes = 8\n #dim = 3\n elif etype == 'CHBDYE':\n #self.eid_map[eid] = ieid\n eid_solid = elem.eid2\n side = elem.side\n element_solid = model.elements[eid_solid]\n\n mapped_inids = SIDE_MAP[element_solid.type][side]\n side_inids = [nid - 1 for nid in mapped_inids]\n nodes = element_solid.node_ids\n\n pid = 0\n nnodes = len(side_inids)\n nids = [nodes[inid] for inid in side_inids]\n inids = np.searchsorted(all_nids, nids)\n\n if len(side_inids) == 4:\n cell_type = cell_type_quad4\n else:\n msg = 'element_solid:\\n%s' % (str(element_solid))\n msg += 'mapped_inids = %s\\n' % mapped_inids\n msg += 'side_inids = %s\\n' % side_inids\n msg += 'nodes = %s\\n' % nodes\n #msg += 'side_nodes = %s\\n' % side_nodes\n raise NotImplementedError(msg)\n elif etype == 'GENEL':\n nids = []\n if len(elem.ul_nodes):\n nids.append(elem.ul_nodes)\n if len(elem.ud_nodes):\n nids.append(elem.ud_nodes)\n nids = np.unique(np.hstack(nids))\n #print(elem.get_stats())\n nids = nids[:2]\n\n areai = np.nan\n pid = 0\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = len(nids)\n dim = 1\n else:\n #raise NotImplementedError(elem)\n skipped_etypes.add(etype)\n nelements -= 1\n continue\n #for nid in nids:\n #assert isinstance(nid, integer_types), 'not an integer. nids=%s\\n%s' % (nids, elem)\n #assert nid != 0, 'not a positive integer. nids=%s\\n%s' % (nids, elem)\n\n assert inids is not None\n if not np.array_equal(all_nids[inids], nids):\n msg = 'all_nids[inids]=%s nids=%s\\n%s' % (all_nids[inids], nids, elem)\n raise RuntimeError(msg)\n\n assert cell_type is not None\n assert cell_offset is not None\n assert eid is not None\n assert pid is not None\n assert dim is not None\n assert nnodes is not None\n nids_list.append(nnodes)\n nids_list.extend(inids)\n normals[ieid] = normali\n eids_array[ieid] = eid\n pids_array[ieid] = pid\n dim_array[ieid] = dim\n cell_types_array[ieid] = cell_type\n cell_offsets_array[ieid] = cell_offset # I assume the problem is here\n cell_offset += nnodes + 1\n self.eid_map[eid] = ieid\n\n min_interior_angle[ieid] = min_thetai\n max_interior_angle[ieid] = max_thetai\n dideal_theta[ieid] = dideal_thetai\n max_skew_angle[ieid] = max_skew\n max_warp_angle[ieid] = max_warp\n max_aspect_ratio[ieid] = aspect_ratio\n area[ieid] = areai\n area_ratio[ieid] = area_ratioi\n taper_ratio[ieid] = taper_ratioi\n min_edge_length[ieid] = min_edge_lengthi\n ieid += 1\n\n #print('self.eid_map =', self.eid_map)\n\n icells_zero = np.where(cell_types_array == 0)[0]\n # TODO: I'd like to get rid of deep=1, but it'll crash the edges\n deep = 1\n if len(icells_zero):\n icells = np.where(cell_types_array != 0)[0]\n if len(icells) == 0:\n self.log.error('skipped_etypes = %s' % skipped_etypes)\n raise RuntimeError('there are no elements...')\n eids_array = eids_array[icells]\n pids_array = pids_array[icells]\n #dim_array = pids_array[dim_array]\n cell_types_array = cell_types_array[icells]\n cell_offsets_array = cell_offsets_array[icells]\n nnodes_array = nnodes_array[icells]\n normals = normals[icells, :]\n #deep = 1\n #print('deep = %s' % deep)\n if skipped_etypes:\n self.log.error('skipped_etypes = %s' % list(skipped_etypes))\n #print('skipped_etypes = %s' % skipped_etypes)\n if len(pids_array) != nelements:\n msg = 'nelements=%s len(pids_array)=%s' % (nelements, len(pids_array))\n raise RuntimeError(msg)\n if len(cell_offsets_array) != nelements:\n msg = 'nelements=%s len(cell_offsets_array)=%s' % (nelements, len(cell_offsets_array))\n raise RuntimeError(msg)\n\n nids_array = np.array(nids_list, dtype=dtype)\n\n #-----------------------------------------------------------------\n # saving some data members\n self.element_ids = eids_array\n\n #print('cell_types_array* = ', cell_types_array.tolist())\n #print('cell_offsets_array* = ', cell_offsets_array.tolist())\n\n #-----------------------------------------------------------------\n # build the grid\n\n #self.log.info('nids_array = %s' % nids_array)\n #self.log.info('cell_offsets_array = %s' % cell_offsets_array)\n #self.log.info('cell_types_array = %s' % cell_types_array)\n\n # Create the array of cells\n cells_id_type = numpy_to_vtkIdTypeArray(nids_array, deep=1)\n vtk_cells = vtk.vtkCellArray()\n vtk_cells.SetCells(nelements, cells_id_type)\n\n # Cell types\n vtk_cell_types = numpy_to_vtk(\n cell_types_array, deep=deep,\n array_type=vtk.vtkUnsignedCharArray().GetDataType())\n\n vtk_cell_offsets = numpy_to_vtk(cell_offsets_array, deep=deep,\n array_type=vtk.VTK_ID_TYPE)\n\n grid = self.grid\n #grid = vtk.vtkUnstructuredGrid()\n grid.SetCells(vtk_cell_types, vtk_cell_offsets, vtk_cells)\n\n #-----------------------------------------------------------------\n # fill the results\n nid_to_pid_map = None\n self.isubcase_name_map = {1: ['Nastran', '']}\n icase = 0\n cases = OrderedDict()\n form = ['Geometry', None, []]\n form0 = form[2]\n\n subcase_id = 0\n\n #nids_set = True\n #if nids_set:\n # this intentionally makes a deepcopy\n #nids = np.array(nid_cp_cd[:, 0])\n\n # this intentionally makes a deepcopy\n cds = np.array(nid_cp_cd[:, 2])\n colormap = settings.colormap\n nid_res = GuiResult(subcase_id, 'NodeID', 'NodeID', 'node', all_nids,\n mask_value=0,\n nlabels=None,\n labelsize=None,\n ncolors=None,\n colormap=colormap,\n data_format=None,\n uname='GuiResult')\n cases[icase] = (nid_res, (0, 'Node ID'))\n form0.append(('Node ID', icase, []))\n icase += 1\n\n if cds.max() > 0:\n cd_res = GuiResult(0, header='NodeCd', title='NodeCd',\n location='node', scalar=cds)\n cases[icase] = (cd_res, (0, 'NodeCd'))\n form0.append(('NodeCd', icase, []))\n icase += 1\n\n eid_res = GuiResult(subcase_id, 'ElementID', 'ElementID', 'centroid', eids_array,\n mask_value=0,\n nlabels=None,\n labelsize=None,\n ncolors=None,\n colormap=colormap,\n data_format=None,\n uname='GuiResult')\n cases[icase] = (eid_res, (0, 'ElementID'))\n form0.append(('ElementID', icase, []))\n icase += 1\n\n is_element_dim = True\n #if len(np.unique(dim_array)) > 1:\n #dim_res = GuiResult(subcase_id, 'ElementDim', 'ElementDim', 'centroid', dim_array,\n #mask_value=-1,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (dim_res, (0, 'ElementDim'))\n #form0.append(('ElementDim', icase, []))\n #icase += 1\n\n if nnodes_array.max() > -1:\n nnodes_res = GuiResult(subcase_id, 'NNodes/Elem', 'NNodes/Elem',\n 'centroid', nnodes_array,\n mask_value=0,\n nlabels=None,\n labelsize=None,\n ncolors=None,\n colormap=colormap,\n data_format=None,\n uname='GuiResult')\n cases[icase] = (nnodes_res, (0, 'NNodes/Elem'))\n form0.append(('NNodes/Elem', icase, []))\n icase += 1\n\n #pid_res = GuiResult(subcase_id, 'PropertyID', 'PropertyID', 'centroid', pids_array,\n #mask_value=0,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (pid_res, (0, 'PropertyID'))\n #form0.append(('PropertyID', icase, []))\n #icase += 1\n\n if len(model.properties) and nelements and settings.nastran_is_properties:\n icase, upids, pcomp, pshell, is_pshell_pcomp = self._build_properties(\n model, nelements, eids_array, pids_array, cases, form0, icase)\n icase = _build_materials(model, pcomp, pshell, is_pshell_pcomp,\n cases, form0, icase)\n try:\n icase = _build_optimization(model, pids_array, upids,\n nelements, cases, form0, icase)\n except Exception:\n #raise\n s = StringIO()\n traceback.print_exc(file=s)\n sout = s.getvalue()\n self.gui.log_error(sout)\n print(sout)\n\n #if isgreater_int(mcid_array, -1):\n #mcid_res = GuiResult(subcase_id, 'Material Coordinate System', 'MaterialCoord',\n #'centroid', mcid_array,\n #mask_value=-1,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (mcid_res, (0, 'Material Coordinate System'))\n #form0.append(('Material Coordinate System', icase, []))\n #icase += 1\n\n #if np.isfinite(theta_array).any():\n #print('np.nanmax(theta_array) =', np.nanmax(theta_array))\n #theta_res = GuiResult(subcase_id, 'Theta', 'Theta', 'centroid', theta_array,\n #mask_value=None,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (theta_res, (0, 'Theta'))\n #form0.append(('Theta', icase, []))\n #icase += 1\n\n normal_mag = underflow_norm(normals, axis=1)\n assert len(normal_mag) == nelements\n normals /= normal_mag.reshape(nelements, 1)\n i_not_nan = np.isnan(normal_mag)\n\n #if self.make_offset_normals_dim and nelements:\n #material_coord = None\n #icase, normals = _build_normals_quality(\n #model, self.gui.eid_map, nelements, cases, form0, icase,\n #xyz_cid0, material_coord, material_theta,\n #min_interior_angle, max_interior_angle, dideal_theta,\n #area, max_skew_angle, taper_ratio,\n #max_warp_angle, area_ratio, min_edge_length, max_aspect_ratio,\n #make_offset_normals_dim=self.make_offset_normals_dim)\n #self.normals = normals\n\n #----------------------------------------------------------\n\n is_shell = False\n if False in i_not_nan:\n #max_normal = np.nanmax(normal_mag[i_not_nan])\n #is_shell = np.abs(max_normal) > 0.\n is_shell = True\n is_solid = isfinite_and_nonzero(max_interior_angle)\n #print('is_shell=%s is_solid=%s' % (is_shell, is_solid))\n if is_shell:\n nx_res = GuiResult(\n 0, header='NormalX', title='NormalX',\n location='centroid', scalar=normals[:, 0], data_format='%.2f')\n ny_res = GuiResult(\n 0, header='NormalY', title='NormalY',\n location='centroid', scalar=normals[:, 1], data_format='%.2f')\n nz_res = GuiResult(\n 0, header='NormalZ', title='NormalZ',\n location='centroid', scalar=normals[:, 2], data_format='%.2f')\n nxyz_res = NormalResult(0, 'Normals', 'Normals',\n nlabels=2, labelsize=5, ncolors=2,\n colormap=colormap, data_format='%.1f',\n uname='NormalResult')\n\n\n area_res = GuiResult(0, header='Area', title='Area',\n location='centroid', scalar=area)\n min_edge_length_res = GuiResult(\n 0, header='Min Edge Length', title='Min Edge Length',\n location='centroid', scalar=min_edge_length)\n\n min_theta_res = GuiResult(\n 0, header='Min Interior Angle', title='Min Interior Angle',\n location='centroid', scalar=np.degrees(min_interior_angle))\n max_theta_res = GuiResult(\n 0, header='Max Interior Angle', title='Max Interior Angle',\n location='centroid', scalar=np.degrees(max_interior_angle))\n dideal_theta_res = GuiResult(\n 0, header='Delta Ideal Angle', title='Delta Ideal Angle',\n location='centroid', scalar=np.degrees(dideal_theta))\n\n skew = np.degrees(max_skew_angle)\n skew_res = GuiResult(\n 0, header='Max Skew Angle', title='MaxSkewAngle',\n location='centroid', scalar=skew)\n aspect_res = GuiResult(\n 0, header='Aspect Ratio', title='AspectRatio',\n location='centroid', scalar=max_aspect_ratio)\n\n form_checks = []\n form0.append(('Element Checks', None, form_checks))\n if is_element_dim:\n form_checks.append(('ElementDim', icase, []))\n\n if self.make_offset_normals_dim and self.make_nnodes_result and 0: # pragma: no cover\n nnodes_res = GuiResult(\n 0, header='NNodes/Elem', title='NNodes/Elem',\n location='centroid', scalar=nnodes_array)\n form_checks.append(('NNodes', icase + 1, []))\n cases[icase + 1] = (nnodes_res, (0, 'NNodes'))\n icase += 1\n\n if self.make_offset_normals_dim or 1:\n cases[icase + 1] = (nx_res, (0, 'NormalX'))\n cases[icase + 2] = (ny_res, (0, 'NormalY'))\n cases[icase + 3] = (nz_res, (0, 'NormalZ'))\n cases[icase + 4] = (nxyz_res, (0, 'Normal'))\n\n form_checks.append(('NormalX', icase + 1, []))\n form_checks.append(('NormalY', icase + 2, []))\n form_checks.append(('NormalZ', icase + 3, []))\n form_checks.append(('Normal', icase + 4, []))\n\n cases[icase + 5] = (area_res, (0, 'Area'))\n cases[icase + 6] = (min_edge_length_res, (0, 'Min Edge Length'))\n cases[icase + 7] = (min_theta_res, (0, 'Min Interior Angle'))\n cases[icase + 8] = (max_theta_res, (0, 'Max Interior Angle'))\n cases[icase + 9] = (dideal_theta_res, (0, 'Delta Ideal Angle'))\n cases[icase + 10] = (skew_res, (0, 'Max Skew Angle'))\n cases[icase + 11] = (aspect_res, (0, 'Aspect Ratio'))\n\n form_checks.append(('Area', icase + 5, []))\n form_checks.append(('Min Edge Length', icase + 6, []))\n form_checks.append(('Min Interior Angle', icase + 7, []))\n form_checks.append(('Max Interior Angle', icase + 8, []))\n form_checks.append(('Delta Ideal Angle', icase + 9, []))\n form_checks.append(('Max Skew Angle', icase + 10, []))\n form_checks.append(('Aspect Ratio', icase + 11, []))\n icase += 12\n\n if np.any(np.isfinite(area_ratio)) and np.nanmax(area_ratio) > 1.:\n arearatio_res = GuiResult(\n 0, header='Area Ratio', title='Area Ratio',\n location='centroid', scalar=area_ratio)\n cases[icase] = (arearatio_res, (0, 'Area Ratio'))\n form_checks.append(('Area Ratio', icase, []))\n icase += 1\n\n if np.any(np.isfinite(taper_ratio)) and np.nanmax(taper_ratio) > 1.:\n taperratio_res = GuiResult(\n 0, header='Taper Ratio', title='Taper Ratio',\n location='centroid', scalar=taper_ratio)\n cases[icase] = (taperratio_res, (0, 'Taper Ratio'))\n form_checks.append(('Taper Ratio', icase, []))\n icase += 1\n\n if isfinite_and_nonzero(max_warp_angle):\n warp_res = GuiResult(\n 0, header='Max Warp Angle', title='MaxWarpAngle',\n location='centroid', scalar=np.degrees(max_warp_angle))\n cases[icase + 4] = (warp_res, (0, 'Max Warp Angle'))\n form_checks.append(('Max Warp Angle', icase, []))\n icase += 1\n\n #if (np.abs(xoffset).max() > 0.0 or np.abs(yoffset).max() > 0.0 or\n #np.abs(zoffset).max() > 0.0):\n # offsets\n #offset_res = GuiResult(\n #0, header='Offset', title='Offset',\n #location='centroid', scalar=offset, data_format='%g')\n #offset_x_res = GuiResult(\n #0, header='OffsetX', title='OffsetX',\n #location='centroid', scalar=xoffset, data_format='%g')\n #offset_y_res = GuiResult(\n #0, header='OffsetY', title='OffsetY',\n #location='centroid', scalar=yoffset, data_format='%g')\n #offset_z_res = GuiResult(\n #0, header='OffsetZ', title='OffsetZ',\n #location='centroid', scalar=zoffset, data_format='%g')\n\n #cases[icase] = (offset_res, (0, 'Offset'))\n #cases[icase + 1] = (offset_x_res, (0, 'OffsetX'))\n #cases[icase + 2] = (offset_y_res, (0, 'OffsetY'))\n #cases[icase + 3] = (offset_z_res, (0, 'OffsetZ'))\n\n #form_checks.append(('Offset', icase, []))\n #form_checks.append(('OffsetX', icase + 1, []))\n #form_checks.append(('OffsetY', icase + 2, []))\n #form_checks.append(('OffsetZ', icase + 3, []))\n #icase += 4\n\n if self.make_xyz or IS_TESTING:\n x_res = GuiResult(\n 0, header='X', title='X',\n location='node', scalar=xyz_cid0[:, 0], data_format='%g')\n y_res = GuiResult(\n 0, header='Y', title='Y',\n location='node', scalar=xyz_cid0[:, 1], data_format='%g')\n z_res = GuiResult(\n 0, header='Z', title='Z',\n location='node', scalar=xyz_cid0[:, 2], data_format='%g')\n cases[icase] = (x_res, (0, 'X'))\n cases[icase + 1] = (y_res, (0, 'Y'))\n cases[icase + 2] = (z_res, (0, 'Z'))\n form_checks.append(('X', icase + 0, []))\n form_checks.append(('Y', icase + 1, []))\n form_checks.append(('Z', icase + 2, []))\n icase += 3\n\n elif is_solid:\n # only solid elements\n form_checks = []\n form0.append(('Element Checks', None, form_checks))\n\n min_edge_length_res = GuiResult(\n 0, header='Min Edge Length', title='Min Edge Length',\n location='centroid', scalar=min_edge_length)\n min_theta_res = GuiResult(\n 0, header='Min Interior Angle', title='Min Interior Angle',\n location='centroid', scalar=np.degrees(min_interior_angle))\n max_theta_res = GuiResult(\n 0, header='Max Interior Angle', title='Max Interior Angle',\n location='centroid', scalar=np.degrees(max_interior_angle))\n skew = 90. - np.degrees(max_skew_angle)\n #skew_res = GuiResult(0, header='Max Skew Angle', title='MaxSkewAngle',\n #location='centroid', scalar=skew)\n if is_element_dim:\n form_checks.append(('ElementDim', icase, []))\n form_checks.append(('Min Edge Length', icase + 1, []))\n form_checks.append(('Min Interior Angle', icase + 2, []))\n form_checks.append(('Max Interior Angle', icase + 3, []))\n form_checks.append(('Max Skew Angle', icase + 4, []))\n cases[icase + 1] = (min_edge_length_res, (0, 'Min Edge Length'))\n cases[icase + 2] = (min_theta_res, (0, 'Min Interior Angle'))\n cases[icase + 3] = (max_theta_res, (0, 'Max Interior Angle'))\n #cases[icase + 4] = (skew_res, (0, 'Max Skew Angle'))\n icase += 4\n\n else:\n form0.append(('ElementDim', icase, []))\n icase += 1\n\n if isgreater_int(mcid_array, -1):\n material_coord_res = GuiResult(\n 0, header='MaterialCoord', title='MaterialCoord',\n location='centroid',\n scalar=mcid_array, mask_value=-1, data_format='%i')\n cases[icase] = (material_coord_res, (0, 'MaterialCoord'))\n form0.append(('MaterialCoord', icase, []))\n icase += 1\n if isfinite(material_theta_array):\n material_theta_res = GuiResult(\n 0, header='MaterialTheta', title='MaterialTheta',\n location='centroid',\n scalar=material_theta_array, data_format='%.3f')\n cases[icase] = (material_theta_res, (0, 'MaterialTheta'))\n form0.append(('MaterialTheta', icase, []))\n icase += 1\n\n #print(normals)\n #----------------------------------------------------------\n # finishing up vtk\n if nelements and isfinite(min_edge_length):\n mean_edge_length = np.nanmean(min_edge_length)\n self.set_glyph_scale_factor(mean_edge_length * 2.5) # was 1.5\n\n grid.Modified()\n #----------------------------------------------------------\n # finishing up parameters\n self.node_ids = all_nids\n self.normals = normals\n\n return nid_to_pid_map, icase, cases, form", "def cloudy_grid_surface(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n cloudy_library = clo.library()\n model_number_matrix,grid_table = cloudy_library._restore_grid_table(grid_ext=p.grid_ext)\n\n fig = plt.figure(figsize=(10,7))\n ax = plt.axes(projection='3d')\n\n key1, key2 = list(p.cloudy_param.keys())[0],list(p.cloudy_param.keys())[1]\n value1, value2 = list(p.cloudy_param.values())[0],list(p.cloudy_param.values())[1]\n\n # Decide on what goes on x and y axis\n cloudy_parameters = np.array(['NH','FUV','hden','Z'])\n x_index = cloudy_parameters[(cloudy_parameters != key1) &\\\n (cloudy_parameters != key2)][0]\n y_index = cloudy_parameters[(cloudy_parameters != key1) &\\\n (cloudy_parameters != key2)][1]\n\n # Cut in grid table\n grid_table_cut = grid_table.iloc[np.where((grid_table[key1].values == value1) & \\\n (grid_table[key2].values == value2))[0]]\n x, y = grid_table_cut[x_index].values, grid_table_cut[y_index].values\n X, Y = np.meshgrid(np.unique(grid_table_cut[x_index].values), np.unique(grid_table_cut[y_index].values))\n\n # Plot line ratio?\n if '_' in p.line:\n L1 = grid_table_cut[p.line.split('_')[0]].values\n L2 = grid_table_cut[p.line.split('_')[1]].values\n L2[L2 == 0] = 1e9\n line_lum = (L1/L2).astype(float)\n vmin = np.min(np.log10(line_lum[L2 < 1e9]))\n\n else:\n line_lum = grid_table_cut[p.line].values.astype(float)\n vmin = np.min(np.log10(line_lum[line_lum > 0]))\n\n lum = np.log10(line_lum)\n lum = lum.reshape([len(np.unique(x)), len(np.unique(y))]).T\n\n # ########## Patching the grid !!\n # line_lum[np.isnan(line_lum)] = -1 # what are these?\n # # 0 values: not sure if we have any?\n # line_lum[line_lum == 0] = np.min(line_lum[line_lum > 0])\n # # Negative numbers: missing grid point\n # i_missing = np.where(line_lum < 0)[0]\n # while len(i_missing) > 0:\n # lum = np.log10(line_lum)\n # for i in i_missing:\n # # print(lum[i-1],lum[i+1])\n # try: \n # lum[i] = (lum[i-1] + lum[i+1])/ 2\n # except:\n # pass\n # # print('he',np.isnan(lum[i]))\n # if np.isnan(lum[i]):\n # try:\n # lum[i] = lum[i-1] \n # except:\n # pass\n # if np.isnan(lum[i]):\n # try:\n # lum[i] = lum[i+1] \n # except:\n # pass \n # line_lum[i] = 10.**lum[i]\n # # print(i,lum[i])\n # i_missing = np.where(line_lum < 0)[0]\n # ########## End of patching\n\n\n # pdb.set_trace()\n ax.plot_surface(X, Y, lum, cmap=\"autumn_r\", vmin=vmin, lw=0, rstride=1, cstride=1,alpha=0.8)\n\n ax.set_xlabel('\\n\\n' + getlabel('l'+x_index))\n ax.set_ylabel('\\n\\n' + getlabel('l'+y_index))\n\n try:\n ax.set_zlabel('\\n\\n' + getlabel('l%s' % p.line))\n except:\n ax.set_zlabel('\\n\\n log ' + p.line.replace('_','/'))\n\n\n ax.scatter(x[line_lum > 10.**vmin],y[line_lum > 10.**vmin],np.log10(line_lum[line_lum > 10.**vmin]),\\\n 'o',c=np.log10(line_lum[line_lum > 10.**vmin]),cmap='autumn_r',s=50)\n\n # print(x)\n # print(line_lum)\n ax.view_init(30, p.angle)\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'look-up/'): os.mkdir(p.d_plot + 'look-up/') \n plt.savefig(p.d_plot + 'look-up/cloudy_grid_%s.%s' % (p.line, p.format), format=p.format, dpi=300) \n # pdb.set_trace()", "def vsn_func_1(ns, traj, vs_def_beads_ids):\n for ts in ns.aa2cg_universe.trajectory:\n traj[ts.frame] = ns.aa2cg_universe.atoms[vs_def_beads_ids].center_of_geometry(pbc=None)", "def chooseObject(self, x, y, z):\r\n\r\n \"\"\"deletes the editPoints list. This must be reset every time a new object\r\n is clicked on so that the prevous editpoints from another segmentation do not\r\n interfere with the current object's segmentation\"\"\"\r\n \r\n del self.editPoints[:]\r\n \r\n self.dispedge = to_rgb(self.img[self.z_stack])\r\n \r\n #print \"SENDER LABEL: \" + str(sender.text()\r\n self.center= np.array((z*self.zinterp, x, y))\r\n\r\n xpix=self.img.shape[2]-1\r\n ypix= self.img.shape[1]-1\r\n zpix= self.img.shape[0]*self.zinterp-1\r\n \r\n\r\n #currently padding all sides by 50 for now.\r\n\r\n self.radius= self.curRadius\r\n self.count+=1\r\n \r\n self.padList= np.array([xpix-x, ypix-y, zpix-z*self.zinterp, x, y,z*self.zinterp])\r\n self.padList= self.radius-self.padList\r\n self.padList=(self.padList>0)*self.padList\r\n\r\n \"\"\"perform graphcut and display on interface\"\"\"\r\n self.temp, self.edge= graphCut(interp_img(self.img, self.zinterp), self.center, self.radius,self.temp, self.edge, self.count, self.editPoints, self.padList, self.theta_div, self.phi_div)\r\n self.shrink= self.edge[0:interp_img(self.img, self.zinterp).shape[0]:self.zinterp]!=0\r\n \r\n \"\"\"display the object on the xy plane\"\"\"\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n \r\n \"\"\"display the object on the xz plane\"\"\"\r\n self.pixmap4=self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n \r\n \"\"\"display the object on the yz plane\"\"\"\r\n self.pixmap6=self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)" ]
[ "0.7424394", "0.6814888", "0.5967758", "0.5692729", "0.5601582", "0.5594345", "0.5561491", "0.5481942", "0.54616654", "0.54466075", "0.5438576", "0.54213005", "0.5418105", "0.5338465", "0.5325525", "0.5324954", "0.5324351", "0.53149396", "0.5309649", "0.5297013", "0.52767515", "0.5262481", "0.52606934", "0.5246041", "0.524106", "0.5227649", "0.52269155", "0.5222478", "0.52158046", "0.5210348" ]
0.7474481
0
Given the object surface dictionary and neighbors dictionary, unfold rest of the surfaces with a selected surface at the center
def unfold_object(current_surface,surface_dict,neighbors_dict): # Initialize a dictionary for the unfolded surfaces unfolded_surfaces = dict() # Generate a list of surface numbers surface_list = list(surface_dict.keys()) # Add center surface to dictionary without modification unfolded_surfaces[current_surface] = surface_dict[current_surface] # Remove center surface from surface numbers list surface_list.remove(current_surface) # Generate an open list that contains the neighboring surface numbers to the center surface open_list = [(neighbor,[current_surface]) for neighbor in neighbors_dict[current_surface]] # Run until all surfaces are added to the unfolding while len(surface_list) > 0: # Next item in open list item = open_list[0] # Initialize a list of parents parents = list() # If neighbor is not already added if item[0] in surface_list: # Get folded version of the surface folded_surf = surface_dict[item[0]] child = item[0] # while there are still other parents of neighboring surface # (when more than one unfolding is required for a neighbor of the neighbor) while len(item[1])>0: # get parent surface no parent = item[1][-1] # unfold surface on the parent folded_surf = unfold_surface(surface_dict,neighbors_dict,item[1][-1],folded_surf,neighbors_dict[parent][child]) # parent becomes the child child = item[1][-1] # remove parent and add to the parents list parents.append(item[1].pop()) unfolded_surf = folded_surf else: open_list.pop(0) continue unfolded_surfaces[item[0]] = unfolded_surf surface_list.remove(item[0]) parents.reverse() # for neighbors of the current neighbor for neighbor in neighbors_dict[item[0]]: # if not already added if neighbor in surface_list: # add to the open list with correct parenting order open_list.append((neighbor,parents+[item[0]])) else: continue open_list.pop(0) return unfolded_surfaces
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfold_surface(surface_dict,neighbors_dict,surf_idx,other,neighbor,show=False):\n\n # Visualization\n p = Renderer()\n p.add((surface_dict[surf_idx][0],'r',1))\n\n # Normal of the center surface\n current_normal = surface_dict[surf_idx][1]\n # Normal of the neighboring surface\n candidate_normal = other[1]\n\n # Angle between surfaces\n angle = candidate_normal.angle(current_normal)\n\n # Rotation calculations (Finding transformation matrix)\n A,B,C = neighbor[1]\n L = np.sqrt(A**2 + B**2 + C**2)\n V = np.sqrt(B**2 + C**2)\n D = np.array([[1,0,0,-neighbor[2][0]],[0,1,0,-neighbor[2][1]],[0,0,1,-neighbor[2][2]],[0,0,0,1]])\n if V == 0:\n R_x = np.eye(4)\n else:\n R_x = np.array([[1,0,0,0],[0,C/V,-B/V,0],[0,B/V,C/V,0],[0,0,0,1]])\n R_y = np.array([[V/L,0,-A/L,0],[0,1,0,0],[A/L,0,V/L,0],[0,0,0,1]])\n R_z = np.array([[np.cos(angle),-np.sin(angle),0,0],\n [np.sin(angle),np.cos(angle),0,0],\n [0,0,1,0],[0,0,0,1]])\n T = np.linalg.inv(D)@np.linalg.inv(R_x)@np.linalg.inv(R_y)@R_z@R_y@R_x@D\n\n # Applying transformation\n P_init = np.empty((4,0))\n for point in other[0].points:\n point_vec = np.array([point.x,point.y,point.z,1]).reshape(4,1)\n P_init = np.concatenate([P_init,point_vec],axis=1)\n normal_vec = np.array([other[0].points[-1].x+candidate_normal[0],other[0].points[-1].y+candidate_normal[1],other[0].points[-1].z+candidate_normal[2],1]).reshape(4,1)\n P_init = np.concatenate([P_init,normal_vec],axis=1)\n P_final = T@P_init\n new_points = list()\n for i in range(P_final.shape[1]-1):\n new_points.append(gPoint(np.round(P_final[:3,i],decimals=3)))\n \n # New normal vector\n nnormal = Vector(P_final[:3,-1]-P_final[:3,-2])\n\n # New surface definition as convex polygon\n nsurf = ConvexPolygon((new_points))\n\n # Transform goal region as well\n if len(other)>2:\n G_init = np.empty((4,0))\n for point in other[2].points:\n point_vec = np.array([point.x,point.y,point.z,1]).reshape(4,1)\n G_init = np.concatenate([G_init,point_vec],axis=1)\n G_final = T@G_init\n new_goal = list()\n for i in range(G_final.shape[1]):\n new_goal.append(gPoint(np.round(G_final[:3,i],decimals=3)))\n ngoal = ConvexPolygon((new_goal))\n \n p.add((nsurf,'k',1))\n p.add((ngoal,'k',1))\n if show:\n p.add((other[0],'k',1))\n p.show()\n return (nsurf,nnormal,ngoal)\n\n else:\n p.add((nsurf,'k',1))\n if show:\n p.add((other[0],'k',1))\n p.show()\n return (nsurf,nnormal)", "def get_neighbors(object_):\n\n # Initialize neighbors dictionary\n neighbors = dict()\n\n # For each surface in object dictionary\n for surf in object_:\n\n # Selected surface\n current_surface = object_[surf][0]\n\n # Surface normal\n current_normal = object_[surf][1]\n\n # Rest of the surfaces\n dummy_surfaces = object_.copy()\n dummy_surfaces.pop(surf)\n\n # Initialize nested dictionary for selected surface\n neighbors[surf] = dict()\n\n # For each candidate surface (from rest of the surfaces)\n for n in dummy_surfaces:\n # Candidate surface\n candidate_surface = dummy_surfaces[n][0]\n # Candidate normal\n candidate_normal = dummy_surfaces[n][1]\n # Check if there is an intersection - there should be a line intersection if neighboring surfaces\n check = intersection(current_surface,candidate_surface)\n if check is not None:\n # Intersection vector\n rotation_axis = candidate_normal.cross(current_normal)\n # Angle between surfaces\n angle = candidate_normal.angle(current_normal)\n # Corner position\n axis_position = gPoint((check[0].x+check[1].x)/2,(check[0].y+check[1].y)/2,(check[0].z+check[1].z)/2)\n neighbors[surf][n] = ((candidate_surface,rotation_axis,axis_position,angle))\n return neighbors", "def layer_sweep(self):\n for fixed_id, fixed_layer in enumerate(self.layers):\n if fixed_id + 1 == len(self.layers):\n break\n moving_layer = self.layers[fixed_id + 1]\n for node in moving_layer.nodes:\n self.find_neighbors(node)\n if len(node.neighbors) > 0:\n self.calculate_barycenter(node)\n else:\n node.barycenter = 0 #1000\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.barycenter, reverse=False)\n for slot, node in enumerate(sorted_nodes):\n node.slot = slot + 1\n barys = set([n.barycenter for n in sorted_nodes])\n bary_nodes = [list(filter(lambda x: x.barycenter == b, sorted_nodes)) for b in barys]\n for b in bary_nodes:\n if len(b) > 1:\n for node in b:\n if len(node.sl_neighbors) == 1:\n n_slot = node.sl_neighbors[0].slot\n if n_slot > node.slot:\n other_node = max(b, key=lambda s: s.slot)\n elif n_slot < node.slot:\n other_node = min(b, key=lambda s: s.slot)\n temp = node.slot\n node.slot = other_node.slot\n other_node.slot = temp\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.slot, reverse=False)\n moving_layer.nodes = sorted_nodes", "def __init__(self, initial_surfaces):\n\n nsurf = len(initial_surfaces)\n nvert, nedge, ngroup, \\\n surf_ptrs, edge_ptrs, \\\n surf_group, edge_group \\\n = self._compute_topology(initial_surfaces)\n\n self._num = {\n 'surf': len(initial_surfaces),\n 'vert': nvert,\n 'edge': nedge,\n 'group': ngroup,\n }\n\n self._topo = {\n 'surf_ptrs': surf_ptrs,\n 'edge_ptrs': edge_ptrs,\n 'surf_group': surf_group,\n 'edge_group': edge_group,\n }\n\n self._mult = {\n 'vert': numpy.zeros(nvert, int),\n 'edge': numpy.zeros(nedge, int),\n 'diff_vert': numpy.zeros(nvert, int),\n 'diff_edge': numpy.zeros(nedge, int),\n }\n\n self._bspline = {\n 'order': 4 * numpy.ones(ngroup, int),\n 'num_cp': 4 * numpy.ones(ngroup, int),\n 'num_pt': 10 * numpy.ones(ngroup, int),\n }\n\n self._surf_indices = {\n 'df': numpy.zeros((nsurf, 2), int, 'F'),\n 'cp': numpy.zeros((nsurf, 2), int, 'F'),\n 'pt': numpy.zeros((nsurf, 2), int, 'F'),\n }\n\n self._edge_indices = {\n 'df': numpy.zeros((nedge, 2), int, 'F'),\n 'cp': numpy.zeros((nedge, 2), int, 'F'),\n 'pt': numpy.zeros((nedge, 2), int, 'F'),\n }\n\n self._str_indices = {\n 'cp': numpy.zeros((nsurf, 2), int, 'F'),\n 'pt': numpy.zeros((nsurf, 2), int, 'F'),\n }\n\n self._vert_indices = numpy.zeros(nvert, int)\n\n self._size = {\n 'df_str': 0,\n 'df': 0,\n 'cp': 0,\n 'cp_str': 0,\n 'pt_str': 0,\n 'pt': 0,\n }\n\n self.diff = {\n 'surf': numpy.zeros((nsurf, 3, 3), bool, 'F'),\n 'edge': numpy.zeros((nedge, 2), bool, 'F'),\n }\n\n self.hidden = numpy.zeros(nsurf, bool)\n\n self.jac = {\n 'd(df)/d(df_str)': None,\n 'd(cp)/d(df)': None,\n 'd(cp_str)/d(cp)': None,\n 'd(pt_str)/d(cp_str)': None,\n 'd(pt)/d(pt_str)': None,\n }\n\n self.vec = {\n 'df_str': None,\n 'df': None,\n 'cp': None,\n 'cp_str': None,\n 'pt_str': None,\n 'pt': None,\n }", "def get_objects(color, depth, threshold1, threshold2):\n\n gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n surf = cv2.xfeatures2d.SURF_create(500)\n\n # find and draw the keypoints\n kp = surf.detect(blur,None)\n\n pts = [p.pt for p in kp]\n xpts = []\n ypts = []\n\n # evaluate the keypoints and only save the keypoints who are between the given threshold\n depth_values = []\n for i in range(0,len(pts)):\n xco = int(pts[i][0])\n yco = int(pts[i][1])\n depth_value = depth[yco][xco]\n if depth_value >= float(threshold1) and depth_value <= float(threshold2):\n xpts.append(xco)\n ypts.append(yco)\n depth_values.append(depth_value)\n\n # make histogram of x coordinates of the saved keypoints\n n, distr, _ = plt.hist(xpts)\n plt.savefig('hist.png')\n\n # evaluate the histogram and make seperate arrays for the different objects\n objectarray = []\n temp = []\n for i in range(len(n)):\n if n[i] > 0:\n temp.append(distr[i])\n temp.append(distr[i+1])\n else:\n if len(temp)!=0:\n objectarray.append(temp)\n temp = []\n objectarray.append(temp)\n\n objects = []\n\n # determine the objects with the previous calculated arrays\n for i in range(len(objectarray)):\n y_values = []\n min_x = int(np.amin(objectarray[i]))\n max_x = int(np.amax(objectarray[i]))\n\n for j in range(len(xpts)):\n if xpts[j] > min_x and xpts[j] < max_x:\n y_values.append(ypts[j])\n\n min_y = int(np.amin(y_values))\n max_y = int(np.amax(y_values))\n x = min_x\n y = min_y\n w = max_x - min_x\n h = max_y - min_y\n\n depth_mean = round(get_depth_mean(depth, x, y, w, h), 3)\n\n object = DetectedObject(x, y, w, h, depth_mean)\n objects.append(object)\n\n return objects", "def apply(self, mode='lateral'):\n num_lat_slices = self.img3d.shape[0]\n num_cor_slices = self.img3d.shape[2]\n bin_mask = np.zeros(self.mask3d.shape)\n x,y,z = np.where(self.mask3d==self.vertebra_id)\n bin_mask[np.min(x):np.max(x), np.min(y):np.max(y), np.min(z):np.max(z)] = 1\n if mode=='lateral' or mode=='fuse':\n mask_lat = np.zeros((6, self.mask3d.shape[0], self.mask3d.shape[1], self.mask3d.shape[2]))\n img_lat = np.zeros(self.img3d.shape)\n binary_lat = np.zeros(self.mask3d.shape)\n # for each lateral slice\n for idx in range(num_lat_slices):\n img_slice, mask_slice = np.copy(self.img3d[idx, :, :]), np.copy(self.mask3d[idx, :, :])\n xloc, yloc = np.where(mask_slice==self.vertebra_id)\n # check if vertebra is present in image\n if xloc.shape[0]==0:\n # if not keep mask as it is\n mask_lat[:,idx, :, :] = self.get_one_hot(mask_slice)\n img_lat[idx, :, :] = img_slice\n else:\n min_x, max_x = np.min(xloc), np.max(xloc)\n min_y, max_y = np.min(yloc), np.max(yloc)\n inpainted_img, inpainted_mask, binary_mask = self.inpaint(img_slice, mask_slice, min_x, max_x, min_y, max_y)\n mask_lat[:,idx, :, :] = inpainted_mask\n img_lat[idx,:, :] = inpainted_img\n binary_lat[idx,:,:] = binary_mask\n\n\n if mode=='coronal' or mode=='fuse':\n mask_cor = np.zeros((6, self.mask3d.shape[0], self.mask3d.shape[1], self.mask3d.shape[2]))\n img_cor = np.zeros(self.img3d.shape)\n binary_cor = np.zeros(self.mask3d.shape)\n # for each coronal slice\n for idx in range(num_cor_slices):\n img_slice, mask_slice = np.copy(self.img3d[:, :, idx]), np.copy(self.mask3d[:, :, idx])\n xloc, yloc = np.where(mask_slice==self.vertebra_id)\n # check if vertebra is present in image\n if xloc.shape[0]==0:\n # if not keep mask as it is\n mask_cor[:, :, :, idx] = self.get_one_hot(mask_slice)\n img_cor[:, :, idx] = img_slice\n else:\n min_x, max_x = np.min(xloc), np.max(xloc)\n min_y, max_y = np.min(yloc), np.max(yloc)\n # else remove fractured vertebra and inpaint\n inpainted_img, inpainted_mask, binary_mask = self.inpaint(img_slice, mask_slice, min_x, max_x, min_y, max_y, 'coronal')\n mask_cor[:, :, :, idx] = inpainted_mask\n img_cor[:, :, idx] = inpainted_img\n binary_cor[:,:,idx] = binary_mask\n \n # return to a one channel mask and convert labels back\n if mode=='lateral':\n mask_lat = np.argmax(mask_lat, axis=0)\n mask_lat = self.map_class_to_vert(mask_lat)\n self.mask3d = mask_lat\n self.img3d = img_lat\n elif mode=='coronal':\n mask_cor = np.argmax(mask_cor, axis=0)\n mask_cor = self.map_class_to_vert(mask_cor)\n self.mask3d = mask_cor\n self.img3d = img_cor\n elif mode=='fuse':\n mask_fuse = mask_cor*0.5+mask_lat*0.5\n mask_fuse = np.argmax(mask_fuse, axis=0)\n mask_fuse = self.map_class_to_vert(mask_fuse)\n self.mask3d = mask_fuse\n self.img3d = (img_lat+img_cor)/2\n \n # save result\n self.mask3d = self.mask3d.astype(np.uint8)\n self.img3d = self.img3d.astype(np.float32)\n \n # put back if we padded and cropped\n if self.padz and self.padx:\n self.orig_img3d[:,self.ymin:self.ymax, :] = self.img3d[self.xcrop1:-self.xcrop2,:,self.zcrop1:-self.zcrop2]\n self.orig_mask3d[:,self.ymin:self.ymax, :] = self.mask3d[self.xcrop1:-self.xcrop2,:,self.zcrop1:-self.zcrop2]\n elif self.padz and not self.padx:\n self.orig_img3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, :] = self.img3d[:,:,self.zcrop1:-self.zcrop2]\n self.orig_mask3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, :] = self.mask3d[:,:,self.zcrop1:-self.zcrop2]\n elif not self.padz and self.padx:\n self.orig_img3d[:,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.img3d[self.xcrop1:-self.xcrop2,:,:]\n self.orig_mask3d[:,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.mask3d[self.xcrop1:-self.xcrop2,:,:]\n else:\n self.orig_img3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.img3d\n self.orig_mask3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.mask3d\n \n img = return_scan_to_orig(self.orig_img3d, self.mask_affine, self.mask_header, self.zooms)\n nib.save(img, self.inpainted_img_path)\n\n mask_fuse = return_scan_to_orig(self.orig_mask3d, self.mask_affine, self.mask_header, self.zooms, np.uint8)\n nib.save(mask_fuse, self.inpainted_mask_path)\n print('Inpaint mask and image saved at: ', self.inpainted_mask_path, self.inpainted_img_path)", "def build_halo_mask(fixed_depth=30, margin=21, min_fragment=10):\n assert margin % 2 is not 0, \"Margin should be odd\"\n\n rr, cc = circle(margin / 2, margin / 2, margin / 2 + 1, shape=(margin, margin))\n structure_element = numpy.zeros((margin, margin))\n structure_element[rr, cc] = 1\n structure_element = numpy.repeat(numpy.expand_dims(numpy.expand_dims(structure_element, 0), 0), fixed_depth, 0)\n\n sel = torch.from_numpy(structure_element).float().to(device)\n\n def f(label):\n \"\"\"\n \n :param label: batch of instance levels each instance must have unique id\n :return: labels, masks and object_lists used by halo loss\n \"\"\"\n back = numpy.zeros((label.shape[0], fixed_depth, label.shape[1], label.shape[2]))\n object_list = []\n for i in range(label.shape[0]):\n bincount = numpy.bincount(label[i].flatten())\n pixels = numpy.where(bincount > min_fragment)[0]\n if len(pixels) > fixed_depth:\n pixels = pixels[:fixed_depth]\n warnings.warn(\"Not all objects fits in fixed depth\", RuntimeWarning)\n\n for l, v in enumerate(pixels):\n back[i, l, label[i] == v] = 1.\n object_list.append(numpy.array(range(l + 1)))\n\n labels = torch.from_numpy(back).float().to(device)\n masks = F.conv2d(labels, sel, groups=fixed_depth, padding=margin / 2)\n \n masks[masks > 0] = 1.\n masks[labels > 0] = 2.\n masks[:, 0, :, :] = 1.\n \n weights=masks.sum(-1,keepdim=True).sum(-2,keepdim=True)\n weights[weights==0.]=1.\n \n masks = masks/weights\n \n return labels, masks, object_list\n\n return f", "def _map_elements3(self, nid_map, model, unused_j, unused_dim_max,\n nid_cp_cd, xref_loads=True):\n settings = self.gui.settings # type: Settings\n\n # these normals point inwards\n # 4\n # / | \\\n # / | \\\n # 3-------2\n # \\ | /\n # \\ | /\n # 1\n _ctetra_faces = (\n (0, 1, 2), # (1, 2, 3),\n (0, 3, 1), # (1, 4, 2),\n (0, 3, 2), # (1, 3, 4),\n (1, 3, 2), # (2, 4, 3),\n )\n\n # these normals point inwards\n #\n #\n #\n #\n # /4-----3\n # / /\n # / 5 /\n # / \\ /\n # / \\ /\n # 1---------2\n _cpyram_faces = (\n (0, 1, 2, 3), # (1, 2, 3, 4),\n (1, 4, 2), # (2, 5, 3),\n (2, 4, 3), # (3, 5, 4),\n (0, 3, 4), # (1, 4, 5),\n (0, 4, 1), # (1, 5, 2),\n )\n\n # these normals point inwards\n # /6\n # / | \\\n # / | \\\n # 3\\ | \\\n # | \\ /4-----5\n # | \\/ /\n # | / \\ /\n # | / \\ /\n # | / \\ /\n # 1---------2\n _cpenta_faces = (\n (0, 2, 1), # (1, 3, 2),\n (3, 4, 5), # (4, 5, 6),\n\n (0, 1, 4, 3), # (1, 2, 5, 4), # bottom\n (1, 2, 5, 4), # (2, 3, 6, 5), # right\n (0, 3, 5, 2), # (1, 4, 6, 3), # left\n )\n\n # these normals point inwards\n # 8----7\n # /| /|\n # / | / |\n # / 5-/--6\n # 4-----3 /\n # | / | /\n # | / | /\n # 1-----2\n _chexa_faces = (\n (4, 5, 6, 7), # (5, 6, 7, 8),\n (0, 3, 2, 1), # (1, 4, 3, 2),\n (1, 2, 6, 5), # (2, 3, 7, 6),\n (2, 3, 7, 6), # (3, 4, 8, 7),\n (0, 4, 7, 3), # (1, 5, 8, 4),\n (0, 6, 5, 4), # (1, 7, 6, 5),\n )\n\n elements, nelements, unused_superelements = get_elements_nelements_unvectorized(model)\n xyz_cid0 = self.xyz_cid0\n pids_array = np.zeros(nelements, dtype='int32')\n eids_array = np.zeros(nelements, dtype='int32')\n mcid_array = np.full(nelements, -1, dtype='int32')\n material_theta_array = np.full(nelements, np.nan, dtype='float32')\n dim_array = np.full(nelements, -1, dtype='int32')\n nnodes_array = np.full(nelements, -1, dtype='int32')\n\n # quality\n min_interior_angle = np.zeros(nelements, 'float32')\n max_interior_angle = np.zeros(nelements, 'float32')\n dideal_theta = np.zeros(nelements, 'float32')\n max_skew_angle = np.zeros(nelements, 'float32')\n max_warp_angle = np.zeros(nelements, 'float32')\n max_aspect_ratio = np.zeros(nelements, 'float32')\n area = np.zeros(nelements, 'float32')\n area_ratio = np.zeros(nelements, 'float32')\n taper_ratio = np.zeros(nelements, 'float32')\n min_edge_length = np.zeros(nelements, 'float32')\n normals = np.full((nelements, 3), np.nan, 'float32')\n\n nids_list = []\n ieid = 0\n cell_offset = 0\n\n dtype = get_numpy_idtype_for_vtk()\n\n cell_types_array = np.zeros(nelements, dtype=dtype)\n cell_offsets_array = np.zeros(nelements, dtype=dtype)\n\n cell_type_point = 1 # vtk.vtkVertex().GetCellType()\n cell_type_line = 3 # vtk.vtkLine().GetCellType()\n cell_type_tri3 = 5 # vtkTriangle().GetCellType()\n cell_type_tri6 = 22 # vtkQuadraticTriangle().GetCellType()\n cell_type_quad4 = 9 # vtkQuad().GetCellType()\n #cell_type_quad8 = 23 # vtkQuadraticQuad().GetCellType()\n cell_type_tetra4 = 10 # vtkTetra().GetCellType()\n cell_type_tetra10 = 24 # vtkQuadraticTetra().GetCellType()\n cell_type_pyram5 = 14 # vtkPyramid().GetCellType()\n #cell_type_pyram13 = 27 # vtk.vtkQuadraticPyramid().GetCellType()\n cell_type_penta6 = 13 # vtkWedge().GetCellType()\n cell_type_penta15 = 26 # vtkQuadraticWedge().GetCellType()\n cell_type_hexa8 = 12 # vtkHexahedron().GetCellType()\n cell_type_hexa20 = 25 # vtkQuadraticHexahedron().GetCellType()\n\n # per gui/testing_methods.py/create_vtk_cells_of_constant_element_type\n #1 = vtk.vtkVertex().GetCellType()\n #3 = vtkLine().GetCellType()\n #5 = vtkTriangle().GetCellType()\n #9 = vtk.vtkQuad().GetCellType()\n #10 = vtkTetra().GetCellType()\n #vtkPenta().GetCellType()\n #vtkHexa().GetCellType()\n #vtkPyram().GetCellType()\n\n skipped_etypes = set()\n all_nids = nid_cp_cd[:, 0]\n ieid = 0\n for eid, elem in sorted(elements.items()):\n if ieid % 5000 == 0 and ieid > 0:\n print(' map_elements = %i' % ieid)\n etype = elem.type\n nnodes = None\n nids = None\n pid = None\n cell_type = None\n inids = None\n\n dideal_thetai = np.nan\n min_thetai = np.nan\n max_thetai = np.nan\n #max_thetai = np.nan\n max_skew = np.nan\n max_warp = np.nan\n aspect_ratio = np.nan\n areai = np.nan\n area_ratioi = np.nan\n taper_ratioi = np.nan\n min_edge_lengthi = np.nan\n normali = np.nan\n if etype in ['CTRIA3', 'CTRIAR', 'CTRAX3', 'CPLSTN3', 'CPLSTS3']:\n nids = elem.nodes\n pid = elem.pid\n cell_type = cell_type_tri3 # 5\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3 = xyz_cid0[inids, :]\n out = tri_quality(p1, p2, p3)\n (areai, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out\n normali = np.cross(p1 - p2, p1 - p3)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n nnodes = 3\n dim = 2\n\n elif etype in {'CQUAD4', 'CQUADR', 'CPLSTN4', 'CPLSTS4', 'CQUADX4',\n 'CQUAD1'}: # nastran95\n nids = elem.nodes\n pid = elem.pid\n cell_type = cell_type_quad4 #9\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4 = xyz_cid0[inids, :]\n out = quad_quality(elem, p1, p2, p3, p4)\n (areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out\n normali = np.cross(p1 - p3, p2 - p4)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n nnodes = 4\n dim = 2\n\n elif etype in ['CTRIA6']:\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_tri3\n inids = np.searchsorted(all_nids, nids[:3])\n nids = nids[:3]\n p1, p2, p3 = xyz_cid0[inids, :]\n nnodes = 3\n else:\n cell_type = cell_type_tri6\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4, unused_p5, unused_p6 = xyz_cid0[inids, :]\n nnodes = 6\n out = tri_quality(p1, p2, p3)\n (areai, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out\n normali = np.cross(p1 - p2, p1 - p3)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n dim = 2\n elif etype == 'CQUAD8':\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_tri3\n inids = np.searchsorted(all_nids, nids[:4])\n nids = nids[:4]\n p1, p2, p3, p4 = xyz_cid0[inids, :]\n nnodes = 4\n else:\n cell_type = cell_type_tri6\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4 = xyz_cid0[inids[:4], :]\n nnodes = 8\n out = quad_quality(elem, p1, p2, p3, p4)\n (areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out\n normali = np.cross(p1 - p3, p2 - p4)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n nnodes = 4\n dim = 2\n\n elif etype == 'CSHEAR':\n nids = elem.nodes\n pid = elem.pid\n cell_type = cell_type_quad4 #9\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4 = xyz_cid0[inids, :]\n out = quad_quality(elem, p1, p2, p3, p4)\n (areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out\n normali = np.cross(p1 - p3, p2 - p4)\n nnodes = 4\n dim = 2\n\n elif etype == 'CTETRA':\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_tetra4\n nids = nids[:4]\n nnodes = 4\n else:\n cell_type = cell_type_tetra10\n nnodes = 10\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _ctetra_faces, nids, nid_map, xyz_cid0)\n dim = 3\n\n elif etype == 'CHEXA':\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_hexa8\n nids = nids[:8]\n nnodes = 8\n else:\n cell_type = cell_type_hexa20\n nnodes = 20\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _chexa_faces, nids, nid_map, xyz_cid0)\n dim = 3\n\n elif etype == 'CPENTA':\n nids = elem.nodes\n pid = elem.pid\n\n if None in nids:\n cell_type = cell_type_penta6\n nids = nids[:6]\n nnodes = 6\n else:\n cell_type = cell_type_penta15\n nnodes = 15\n\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _cpenta_faces, nids, nid_map, xyz_cid0)\n dim = 3\n elif etype == 'CPYRAM':\n # TODO: assuming 5\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_pyram5\n nids = nids[:5]\n nnodes = 5\n else:\n cell_type = cell_type_penta15\n nnodes = 15\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _cpyram_faces, nids, nid_map, xyz_cid0)\n dim = 3\n elif etype in ['CELAS2', 'CELAS4', 'CDAMP4']:\n # these can have empty nodes and have no property\n # CELAS1: 1/2 GRID/SPOINT and pid\n # CELAS2: 1/2 GRID/SPOINT, k, ge, and s\n # CELAS3: 1/2 SPOINT and pid\n # CELAS4: 1/2 SPOINT and k\n nids = elem.nodes\n assert nids[0] != nids[1]\n if None in nids:\n assert nids[0] is not None, nids\n assert nids[1] is None, nids\n nids = [nids[0]]\n cell_type = cell_type_point\n nnodes = 1\n else:\n nids = elem.nodes\n assert nids[0] != nids[1]\n cell_type = cell_type_line\n nnodes = 2\n inids = np.searchsorted(all_nids, nids)\n pid = 0\n dim = 0\n elif etype in ['CBUSH', 'CBUSH1D', 'CBUSH2D',\n 'CELAS1', 'CELAS3',\n 'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP5',\n 'CFAST', 'CGAP', 'CVISC']:\n nids = elem.nodes\n assert nids[0] != nids[1]\n assert None not in nids, 'nids=%s\\n%s' % (nids, elem)\n pid = elem.pid\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n nnodes = 2\n dim = 0\n elif etype in ['CBAR', 'CBEAM']:\n nids = elem.nodes\n pid = elem.pid\n pid_ref = model.Property(pid)\n areai = pid_ref.Area()\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = 2\n dim = 1\n elif etype in ['CROD', 'CTUBE']:\n nids = elem.nodes\n pid = elem.pid\n pid_ref = model.Property(pid)\n areai = pid_ref.Area()\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = 2\n dim = 1\n elif etype == 'CONROD':\n nids = elem.nodes\n areai = elem.Area()\n pid = 0\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = 2\n dim = 1\n #------------------------------\n # rare\n #elif etype == 'CIHEX1':\n #nids = elem.nodes\n #pid = elem.pid\n #cell_type = cell_type_hexa8\n #inids = np.searchsorted(all_nids, nids)\n #min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n #_chexa_faces, nids, nid_map, xyz_cid0)\n #nnodes = 8\n #dim = 3\n elif etype == 'CHBDYE':\n #self.eid_map[eid] = ieid\n eid_solid = elem.eid2\n side = elem.side\n element_solid = model.elements[eid_solid]\n\n mapped_inids = SIDE_MAP[element_solid.type][side]\n side_inids = [nid - 1 for nid in mapped_inids]\n nodes = element_solid.node_ids\n\n pid = 0\n nnodes = len(side_inids)\n nids = [nodes[inid] for inid in side_inids]\n inids = np.searchsorted(all_nids, nids)\n\n if len(side_inids) == 4:\n cell_type = cell_type_quad4\n else:\n msg = 'element_solid:\\n%s' % (str(element_solid))\n msg += 'mapped_inids = %s\\n' % mapped_inids\n msg += 'side_inids = %s\\n' % side_inids\n msg += 'nodes = %s\\n' % nodes\n #msg += 'side_nodes = %s\\n' % side_nodes\n raise NotImplementedError(msg)\n elif etype == 'GENEL':\n nids = []\n if len(elem.ul_nodes):\n nids.append(elem.ul_nodes)\n if len(elem.ud_nodes):\n nids.append(elem.ud_nodes)\n nids = np.unique(np.hstack(nids))\n #print(elem.get_stats())\n nids = nids[:2]\n\n areai = np.nan\n pid = 0\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = len(nids)\n dim = 1\n else:\n #raise NotImplementedError(elem)\n skipped_etypes.add(etype)\n nelements -= 1\n continue\n #for nid in nids:\n #assert isinstance(nid, integer_types), 'not an integer. nids=%s\\n%s' % (nids, elem)\n #assert nid != 0, 'not a positive integer. nids=%s\\n%s' % (nids, elem)\n\n assert inids is not None\n if not np.array_equal(all_nids[inids], nids):\n msg = 'all_nids[inids]=%s nids=%s\\n%s' % (all_nids[inids], nids, elem)\n raise RuntimeError(msg)\n\n assert cell_type is not None\n assert cell_offset is not None\n assert eid is not None\n assert pid is not None\n assert dim is not None\n assert nnodes is not None\n nids_list.append(nnodes)\n nids_list.extend(inids)\n normals[ieid] = normali\n eids_array[ieid] = eid\n pids_array[ieid] = pid\n dim_array[ieid] = dim\n cell_types_array[ieid] = cell_type\n cell_offsets_array[ieid] = cell_offset # I assume the problem is here\n cell_offset += nnodes + 1\n self.eid_map[eid] = ieid\n\n min_interior_angle[ieid] = min_thetai\n max_interior_angle[ieid] = max_thetai\n dideal_theta[ieid] = dideal_thetai\n max_skew_angle[ieid] = max_skew\n max_warp_angle[ieid] = max_warp\n max_aspect_ratio[ieid] = aspect_ratio\n area[ieid] = areai\n area_ratio[ieid] = area_ratioi\n taper_ratio[ieid] = taper_ratioi\n min_edge_length[ieid] = min_edge_lengthi\n ieid += 1\n\n #print('self.eid_map =', self.eid_map)\n\n icells_zero = np.where(cell_types_array == 0)[0]\n # TODO: I'd like to get rid of deep=1, but it'll crash the edges\n deep = 1\n if len(icells_zero):\n icells = np.where(cell_types_array != 0)[0]\n if len(icells) == 0:\n self.log.error('skipped_etypes = %s' % skipped_etypes)\n raise RuntimeError('there are no elements...')\n eids_array = eids_array[icells]\n pids_array = pids_array[icells]\n #dim_array = pids_array[dim_array]\n cell_types_array = cell_types_array[icells]\n cell_offsets_array = cell_offsets_array[icells]\n nnodes_array = nnodes_array[icells]\n normals = normals[icells, :]\n #deep = 1\n #print('deep = %s' % deep)\n if skipped_etypes:\n self.log.error('skipped_etypes = %s' % list(skipped_etypes))\n #print('skipped_etypes = %s' % skipped_etypes)\n if len(pids_array) != nelements:\n msg = 'nelements=%s len(pids_array)=%s' % (nelements, len(pids_array))\n raise RuntimeError(msg)\n if len(cell_offsets_array) != nelements:\n msg = 'nelements=%s len(cell_offsets_array)=%s' % (nelements, len(cell_offsets_array))\n raise RuntimeError(msg)\n\n nids_array = np.array(nids_list, dtype=dtype)\n\n #-----------------------------------------------------------------\n # saving some data members\n self.element_ids = eids_array\n\n #print('cell_types_array* = ', cell_types_array.tolist())\n #print('cell_offsets_array* = ', cell_offsets_array.tolist())\n\n #-----------------------------------------------------------------\n # build the grid\n\n #self.log.info('nids_array = %s' % nids_array)\n #self.log.info('cell_offsets_array = %s' % cell_offsets_array)\n #self.log.info('cell_types_array = %s' % cell_types_array)\n\n # Create the array of cells\n cells_id_type = numpy_to_vtkIdTypeArray(nids_array, deep=1)\n vtk_cells = vtk.vtkCellArray()\n vtk_cells.SetCells(nelements, cells_id_type)\n\n # Cell types\n vtk_cell_types = numpy_to_vtk(\n cell_types_array, deep=deep,\n array_type=vtk.vtkUnsignedCharArray().GetDataType())\n\n vtk_cell_offsets = numpy_to_vtk(cell_offsets_array, deep=deep,\n array_type=vtk.VTK_ID_TYPE)\n\n grid = self.grid\n #grid = vtk.vtkUnstructuredGrid()\n grid.SetCells(vtk_cell_types, vtk_cell_offsets, vtk_cells)\n\n #-----------------------------------------------------------------\n # fill the results\n nid_to_pid_map = None\n self.isubcase_name_map = {1: ['Nastran', '']}\n icase = 0\n cases = OrderedDict()\n form = ['Geometry', None, []]\n form0 = form[2]\n\n subcase_id = 0\n\n #nids_set = True\n #if nids_set:\n # this intentionally makes a deepcopy\n #nids = np.array(nid_cp_cd[:, 0])\n\n # this intentionally makes a deepcopy\n cds = np.array(nid_cp_cd[:, 2])\n colormap = settings.colormap\n nid_res = GuiResult(subcase_id, 'NodeID', 'NodeID', 'node', all_nids,\n mask_value=0,\n nlabels=None,\n labelsize=None,\n ncolors=None,\n colormap=colormap,\n data_format=None,\n uname='GuiResult')\n cases[icase] = (nid_res, (0, 'Node ID'))\n form0.append(('Node ID', icase, []))\n icase += 1\n\n if cds.max() > 0:\n cd_res = GuiResult(0, header='NodeCd', title='NodeCd',\n location='node', scalar=cds)\n cases[icase] = (cd_res, (0, 'NodeCd'))\n form0.append(('NodeCd', icase, []))\n icase += 1\n\n eid_res = GuiResult(subcase_id, 'ElementID', 'ElementID', 'centroid', eids_array,\n mask_value=0,\n nlabels=None,\n labelsize=None,\n ncolors=None,\n colormap=colormap,\n data_format=None,\n uname='GuiResult')\n cases[icase] = (eid_res, (0, 'ElementID'))\n form0.append(('ElementID', icase, []))\n icase += 1\n\n is_element_dim = True\n #if len(np.unique(dim_array)) > 1:\n #dim_res = GuiResult(subcase_id, 'ElementDim', 'ElementDim', 'centroid', dim_array,\n #mask_value=-1,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (dim_res, (0, 'ElementDim'))\n #form0.append(('ElementDim', icase, []))\n #icase += 1\n\n if nnodes_array.max() > -1:\n nnodes_res = GuiResult(subcase_id, 'NNodes/Elem', 'NNodes/Elem',\n 'centroid', nnodes_array,\n mask_value=0,\n nlabels=None,\n labelsize=None,\n ncolors=None,\n colormap=colormap,\n data_format=None,\n uname='GuiResult')\n cases[icase] = (nnodes_res, (0, 'NNodes/Elem'))\n form0.append(('NNodes/Elem', icase, []))\n icase += 1\n\n #pid_res = GuiResult(subcase_id, 'PropertyID', 'PropertyID', 'centroid', pids_array,\n #mask_value=0,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (pid_res, (0, 'PropertyID'))\n #form0.append(('PropertyID', icase, []))\n #icase += 1\n\n if len(model.properties) and nelements and settings.nastran_is_properties:\n icase, upids, pcomp, pshell, is_pshell_pcomp = self._build_properties(\n model, nelements, eids_array, pids_array, cases, form0, icase)\n icase = _build_materials(model, pcomp, pshell, is_pshell_pcomp,\n cases, form0, icase)\n try:\n icase = _build_optimization(model, pids_array, upids,\n nelements, cases, form0, icase)\n except Exception:\n #raise\n s = StringIO()\n traceback.print_exc(file=s)\n sout = s.getvalue()\n self.gui.log_error(sout)\n print(sout)\n\n #if isgreater_int(mcid_array, -1):\n #mcid_res = GuiResult(subcase_id, 'Material Coordinate System', 'MaterialCoord',\n #'centroid', mcid_array,\n #mask_value=-1,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (mcid_res, (0, 'Material Coordinate System'))\n #form0.append(('Material Coordinate System', icase, []))\n #icase += 1\n\n #if np.isfinite(theta_array).any():\n #print('np.nanmax(theta_array) =', np.nanmax(theta_array))\n #theta_res = GuiResult(subcase_id, 'Theta', 'Theta', 'centroid', theta_array,\n #mask_value=None,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (theta_res, (0, 'Theta'))\n #form0.append(('Theta', icase, []))\n #icase += 1\n\n normal_mag = underflow_norm(normals, axis=1)\n assert len(normal_mag) == nelements\n normals /= normal_mag.reshape(nelements, 1)\n i_not_nan = np.isnan(normal_mag)\n\n #if self.make_offset_normals_dim and nelements:\n #material_coord = None\n #icase, normals = _build_normals_quality(\n #model, self.gui.eid_map, nelements, cases, form0, icase,\n #xyz_cid0, material_coord, material_theta,\n #min_interior_angle, max_interior_angle, dideal_theta,\n #area, max_skew_angle, taper_ratio,\n #max_warp_angle, area_ratio, min_edge_length, max_aspect_ratio,\n #make_offset_normals_dim=self.make_offset_normals_dim)\n #self.normals = normals\n\n #----------------------------------------------------------\n\n is_shell = False\n if False in i_not_nan:\n #max_normal = np.nanmax(normal_mag[i_not_nan])\n #is_shell = np.abs(max_normal) > 0.\n is_shell = True\n is_solid = isfinite_and_nonzero(max_interior_angle)\n #print('is_shell=%s is_solid=%s' % (is_shell, is_solid))\n if is_shell:\n nx_res = GuiResult(\n 0, header='NormalX', title='NormalX',\n location='centroid', scalar=normals[:, 0], data_format='%.2f')\n ny_res = GuiResult(\n 0, header='NormalY', title='NormalY',\n location='centroid', scalar=normals[:, 1], data_format='%.2f')\n nz_res = GuiResult(\n 0, header='NormalZ', title='NormalZ',\n location='centroid', scalar=normals[:, 2], data_format='%.2f')\n nxyz_res = NormalResult(0, 'Normals', 'Normals',\n nlabels=2, labelsize=5, ncolors=2,\n colormap=colormap, data_format='%.1f',\n uname='NormalResult')\n\n\n area_res = GuiResult(0, header='Area', title='Area',\n location='centroid', scalar=area)\n min_edge_length_res = GuiResult(\n 0, header='Min Edge Length', title='Min Edge Length',\n location='centroid', scalar=min_edge_length)\n\n min_theta_res = GuiResult(\n 0, header='Min Interior Angle', title='Min Interior Angle',\n location='centroid', scalar=np.degrees(min_interior_angle))\n max_theta_res = GuiResult(\n 0, header='Max Interior Angle', title='Max Interior Angle',\n location='centroid', scalar=np.degrees(max_interior_angle))\n dideal_theta_res = GuiResult(\n 0, header='Delta Ideal Angle', title='Delta Ideal Angle',\n location='centroid', scalar=np.degrees(dideal_theta))\n\n skew = np.degrees(max_skew_angle)\n skew_res = GuiResult(\n 0, header='Max Skew Angle', title='MaxSkewAngle',\n location='centroid', scalar=skew)\n aspect_res = GuiResult(\n 0, header='Aspect Ratio', title='AspectRatio',\n location='centroid', scalar=max_aspect_ratio)\n\n form_checks = []\n form0.append(('Element Checks', None, form_checks))\n if is_element_dim:\n form_checks.append(('ElementDim', icase, []))\n\n if self.make_offset_normals_dim and self.make_nnodes_result and 0: # pragma: no cover\n nnodes_res = GuiResult(\n 0, header='NNodes/Elem', title='NNodes/Elem',\n location='centroid', scalar=nnodes_array)\n form_checks.append(('NNodes', icase + 1, []))\n cases[icase + 1] = (nnodes_res, (0, 'NNodes'))\n icase += 1\n\n if self.make_offset_normals_dim or 1:\n cases[icase + 1] = (nx_res, (0, 'NormalX'))\n cases[icase + 2] = (ny_res, (0, 'NormalY'))\n cases[icase + 3] = (nz_res, (0, 'NormalZ'))\n cases[icase + 4] = (nxyz_res, (0, 'Normal'))\n\n form_checks.append(('NormalX', icase + 1, []))\n form_checks.append(('NormalY', icase + 2, []))\n form_checks.append(('NormalZ', icase + 3, []))\n form_checks.append(('Normal', icase + 4, []))\n\n cases[icase + 5] = (area_res, (0, 'Area'))\n cases[icase + 6] = (min_edge_length_res, (0, 'Min Edge Length'))\n cases[icase + 7] = (min_theta_res, (0, 'Min Interior Angle'))\n cases[icase + 8] = (max_theta_res, (0, 'Max Interior Angle'))\n cases[icase + 9] = (dideal_theta_res, (0, 'Delta Ideal Angle'))\n cases[icase + 10] = (skew_res, (0, 'Max Skew Angle'))\n cases[icase + 11] = (aspect_res, (0, 'Aspect Ratio'))\n\n form_checks.append(('Area', icase + 5, []))\n form_checks.append(('Min Edge Length', icase + 6, []))\n form_checks.append(('Min Interior Angle', icase + 7, []))\n form_checks.append(('Max Interior Angle', icase + 8, []))\n form_checks.append(('Delta Ideal Angle', icase + 9, []))\n form_checks.append(('Max Skew Angle', icase + 10, []))\n form_checks.append(('Aspect Ratio', icase + 11, []))\n icase += 12\n\n if np.any(np.isfinite(area_ratio)) and np.nanmax(area_ratio) > 1.:\n arearatio_res = GuiResult(\n 0, header='Area Ratio', title='Area Ratio',\n location='centroid', scalar=area_ratio)\n cases[icase] = (arearatio_res, (0, 'Area Ratio'))\n form_checks.append(('Area Ratio', icase, []))\n icase += 1\n\n if np.any(np.isfinite(taper_ratio)) and np.nanmax(taper_ratio) > 1.:\n taperratio_res = GuiResult(\n 0, header='Taper Ratio', title='Taper Ratio',\n location='centroid', scalar=taper_ratio)\n cases[icase] = (taperratio_res, (0, 'Taper Ratio'))\n form_checks.append(('Taper Ratio', icase, []))\n icase += 1\n\n if isfinite_and_nonzero(max_warp_angle):\n warp_res = GuiResult(\n 0, header='Max Warp Angle', title='MaxWarpAngle',\n location='centroid', scalar=np.degrees(max_warp_angle))\n cases[icase + 4] = (warp_res, (0, 'Max Warp Angle'))\n form_checks.append(('Max Warp Angle', icase, []))\n icase += 1\n\n #if (np.abs(xoffset).max() > 0.0 or np.abs(yoffset).max() > 0.0 or\n #np.abs(zoffset).max() > 0.0):\n # offsets\n #offset_res = GuiResult(\n #0, header='Offset', title='Offset',\n #location='centroid', scalar=offset, data_format='%g')\n #offset_x_res = GuiResult(\n #0, header='OffsetX', title='OffsetX',\n #location='centroid', scalar=xoffset, data_format='%g')\n #offset_y_res = GuiResult(\n #0, header='OffsetY', title='OffsetY',\n #location='centroid', scalar=yoffset, data_format='%g')\n #offset_z_res = GuiResult(\n #0, header='OffsetZ', title='OffsetZ',\n #location='centroid', scalar=zoffset, data_format='%g')\n\n #cases[icase] = (offset_res, (0, 'Offset'))\n #cases[icase + 1] = (offset_x_res, (0, 'OffsetX'))\n #cases[icase + 2] = (offset_y_res, (0, 'OffsetY'))\n #cases[icase + 3] = (offset_z_res, (0, 'OffsetZ'))\n\n #form_checks.append(('Offset', icase, []))\n #form_checks.append(('OffsetX', icase + 1, []))\n #form_checks.append(('OffsetY', icase + 2, []))\n #form_checks.append(('OffsetZ', icase + 3, []))\n #icase += 4\n\n if self.make_xyz or IS_TESTING:\n x_res = GuiResult(\n 0, header='X', title='X',\n location='node', scalar=xyz_cid0[:, 0], data_format='%g')\n y_res = GuiResult(\n 0, header='Y', title='Y',\n location='node', scalar=xyz_cid0[:, 1], data_format='%g')\n z_res = GuiResult(\n 0, header='Z', title='Z',\n location='node', scalar=xyz_cid0[:, 2], data_format='%g')\n cases[icase] = (x_res, (0, 'X'))\n cases[icase + 1] = (y_res, (0, 'Y'))\n cases[icase + 2] = (z_res, (0, 'Z'))\n form_checks.append(('X', icase + 0, []))\n form_checks.append(('Y', icase + 1, []))\n form_checks.append(('Z', icase + 2, []))\n icase += 3\n\n elif is_solid:\n # only solid elements\n form_checks = []\n form0.append(('Element Checks', None, form_checks))\n\n min_edge_length_res = GuiResult(\n 0, header='Min Edge Length', title='Min Edge Length',\n location='centroid', scalar=min_edge_length)\n min_theta_res = GuiResult(\n 0, header='Min Interior Angle', title='Min Interior Angle',\n location='centroid', scalar=np.degrees(min_interior_angle))\n max_theta_res = GuiResult(\n 0, header='Max Interior Angle', title='Max Interior Angle',\n location='centroid', scalar=np.degrees(max_interior_angle))\n skew = 90. - np.degrees(max_skew_angle)\n #skew_res = GuiResult(0, header='Max Skew Angle', title='MaxSkewAngle',\n #location='centroid', scalar=skew)\n if is_element_dim:\n form_checks.append(('ElementDim', icase, []))\n form_checks.append(('Min Edge Length', icase + 1, []))\n form_checks.append(('Min Interior Angle', icase + 2, []))\n form_checks.append(('Max Interior Angle', icase + 3, []))\n form_checks.append(('Max Skew Angle', icase + 4, []))\n cases[icase + 1] = (min_edge_length_res, (0, 'Min Edge Length'))\n cases[icase + 2] = (min_theta_res, (0, 'Min Interior Angle'))\n cases[icase + 3] = (max_theta_res, (0, 'Max Interior Angle'))\n #cases[icase + 4] = (skew_res, (0, 'Max Skew Angle'))\n icase += 4\n\n else:\n form0.append(('ElementDim', icase, []))\n icase += 1\n\n if isgreater_int(mcid_array, -1):\n material_coord_res = GuiResult(\n 0, header='MaterialCoord', title='MaterialCoord',\n location='centroid',\n scalar=mcid_array, mask_value=-1, data_format='%i')\n cases[icase] = (material_coord_res, (0, 'MaterialCoord'))\n form0.append(('MaterialCoord', icase, []))\n icase += 1\n if isfinite(material_theta_array):\n material_theta_res = GuiResult(\n 0, header='MaterialTheta', title='MaterialTheta',\n location='centroid',\n scalar=material_theta_array, data_format='%.3f')\n cases[icase] = (material_theta_res, (0, 'MaterialTheta'))\n form0.append(('MaterialTheta', icase, []))\n icase += 1\n\n #print(normals)\n #----------------------------------------------------------\n # finishing up vtk\n if nelements and isfinite(min_edge_length):\n mean_edge_length = np.nanmean(min_edge_length)\n self.set_glyph_scale_factor(mean_edge_length * 2.5) # was 1.5\n\n grid.Modified()\n #----------------------------------------------------------\n # finishing up parameters\n self.node_ids = all_nids\n self.normals = normals\n\n return nid_to_pid_map, icase, cases, form", "def borehole_plane_intersection(self):\n\n # 1. Step: Compute direction vectors to each borehole ==========================================================\n borehole_data = self.borehole_geometry.copy()\n borehole_data[\"depth\"] = 0\n borehole_to_global_coords(\n data=borehole_data,\n x=\"x\",\n y=\"y\",\n z=\"z\",\n depth=\"depth\",\n upward_gradient=\"upward_gradient\",\n azimuth=\"azimuth\",\n )\n\n # Extract relevant columns from borehole data\n _mask = [\"borehole\", \"x_gts\", \"y_gts\", \"z_gts\", \"_trig_x\", \"_trig_y\", \"_trig_z\"]\n bh_data = borehole_data[_mask]\n\n mapper = {\n \"x_gts\": \"x_bh\",\n \"y_gts\": \"y_bh\",\n \"z_gts\": \"z_bh\",\n \"_trig_x\": \"r_x\",\n \"_trig_y\": \"r_y\",\n \"_trig_z\": \"r_z\",\n }\n bh_data = bh_data.rename(columns=mapper)\n\n # 2. Step: Calculate shear-zone unit normals and centroids =====================================================\n sz = self.planes()\n\n # 3. Step: Extract shear-zone borehole geometry ================================================================\n # i.e. only the shear-zones used for computing shear-zone planes.\n sz_bh = self.shearzone_borehole_geometry.copy()\n sz_bh = sz_bh[sz_bh.depth.notna()]\n sz_bh = sz_bh.rename(columns={\"depth\": \"old_depth\"})\n\n # 4. Step: Merge the collected data ============================================================================\n df = sz.merge(sz_bh, on=\"shearzone\").merge(bh_data, on=\"borehole\")\n\n # 5. Step: Calculate new shear-zone borehole intersections. ====================================================\n # Quantities\n n_vec = [\"n_x\", \"n_y\", \"n_z\"]\n r_vec = [\"r_x\", \"r_y\", \"r_z\"]\n bh_coords = [\"x_bh\", \"y_bh\", \"z_bh\"]\n sz_coords = [\"x_c\", \"y_c\", \"z_c\"]\n\n # Depth calculation\n df[\"depth\"] = (\n (df[sz_coords].values - df[bh_coords].values) * df[n_vec].values\n ).sum(axis=1) / (df[n_vec].values * df[r_vec].values).sum(axis=1)\n\n # Calculate global coordinates\n df.loc[:, \"x_sz\"] = df.x_bh + (df.depth * df.r_x)\n df.loc[:, \"y_sz\"] = df.y_bh + (df.depth * df.r_y)\n df.loc[:, \"z_sz\"] = df.z_bh + (df.depth * df.r_z)\n\n return df", "def get_segmented_point_clouds(seg_masks, depth): \n obj_labels = np.unique(seg_masks)\n num_objs = obj_labels.shape[0]+1\n rows, cols = seg_masks.shape\n cm = plt.get_cmap('gist_rainbow')\n colors = [cm(1. * i/num_objs) for i in range(num_objs)]\n \n object_dict = {}\n # key - object label; val - depth array of that object\n for i in obj_labels:\n object_dict[i] = np.zeros((rows,cols), dtype = np.float32)\n\n for i in range(rows):\n for j in range(cols):\n if seg_masks[i][j] != 0 and seg_masks[i][j] != -1:\n object_dict[seg_masks[i][j]][i][j] = depth[i][j]\n \n segmented_pcds = []\n for key, val in object_dict.items():\n if key == -1 or key == 0:\n continue\n img = o3d.geometry.Image(val)\n pcd_from_depth = o3d.geometry.PointCloud.create_from_depth_image(\n img,\n o3d.camera.PinholeCameraIntrinsic(\n o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n\n # Multiply with Transformation matrix to get correct view of the PCD\n pcd_from_depth.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n pcd_from_depth.paint_uniform_color(np.array(colors[key][:3], dtype = np.uint8) * 255)\n segmented_pcds.append(pcd_from_depth)\n return segmented_pcds", "def regular_neighborhood(self):\n euler_char = self.num_switches() - self.num_branches()\n return Surface(num_punctures=self.num_complementary_regions(),\n euler_char=euler_char)", "def _get_single_direction_neighbors(object_idx, ui_v_dist, ui_h_dist):\n neighbor_dict = {}\n vertical_dist = ui_v_dist[object_idx]\n horizontal_dist = ui_h_dist[object_idx]\n bottom_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] > 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n top_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] < 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n right_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] > 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n left_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] < 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n\n if bottom_neighbors.size:\n neighbor_dict[NeighborContextDesc.TOP] = bottom_neighbors[np.argmin(\n vertical_dist[bottom_neighbors])]\n if top_neighbors.size:\n neighbor_dict[NeighborContextDesc.BOTTOM] = top_neighbors[np.argmax(\n vertical_dist[top_neighbors])]\n if right_neighbors.size:\n neighbor_dict[NeighborContextDesc.LEFT] = right_neighbors[np.argmin(\n horizontal_dist[right_neighbors])]\n if left_neighbors.size:\n neighbor_dict[NeighborContextDesc.RIGHT] = left_neighbors[np.argmax(\n horizontal_dist[left_neighbors])]\n\n return neighbor_dict", "def voronoi_sub_mask_1d_index_to_pixeliztion_1d_index_from_grids_and_geometry(\n grid,\n mask_1d_index_to_nearest_pixelization_1d_index,\n sub_mask_1d_index_to_mask_1d_index,\n pixel_centres,\n pixel_neighbors,\n pixel_neighbors_size,\n):\n\n sub_mask_1d_index_to_pixeliztion_1d_index = np.zeros((grid.shape[0]))\n\n for sub_mask_1d_index in range(grid.shape[0]):\n\n nearest_pixelization_1d_index = mask_1d_index_to_nearest_pixelization_1d_index[\n sub_mask_1d_index_to_mask_1d_index[sub_mask_1d_index]\n ]\n\n while True:\n\n nearest_pixelization_pixel_center = pixel_centres[\n nearest_pixelization_1d_index\n ]\n\n sub_pixel_to_nearest_pixelization_distance = (\n (grid[sub_mask_1d_index, 0] - nearest_pixelization_pixel_center[0]) ** 2\n + (grid[sub_mask_1d_index, 1] - nearest_pixelization_pixel_center[1])\n ** 2\n )\n\n closest_separation_from_pixelization_to_neighbor = 1.0e8\n\n for neighbor_pixelization_1d_index in range(\n pixel_neighbors_size[nearest_pixelization_1d_index]\n ):\n\n neighbor = pixel_neighbors[\n nearest_pixelization_1d_index, neighbor_pixelization_1d_index\n ]\n\n separation_from_neighbor = (\n grid[sub_mask_1d_index, 0] - pixel_centres[neighbor, 0]\n ) ** 2 + (grid[sub_mask_1d_index, 1] - pixel_centres[neighbor, 1]) ** 2\n\n if (\n separation_from_neighbor\n < closest_separation_from_pixelization_to_neighbor\n ):\n closest_separation_from_pixelization_to_neighbor = (\n separation_from_neighbor\n )\n closest_neighbor_pixelization_1d_index = (\n neighbor_pixelization_1d_index\n )\n\n neighboring_pixelization_1d_index = pixel_neighbors[\n nearest_pixelization_1d_index, closest_neighbor_pixelization_1d_index\n ]\n sub_pixel_to_neighboring_pixelization_distance = (\n closest_separation_from_pixelization_to_neighbor\n )\n\n if (\n sub_pixel_to_nearest_pixelization_distance\n <= sub_pixel_to_neighboring_pixelization_distance\n ):\n sub_mask_1d_index_to_pixeliztion_1d_index[\n sub_mask_1d_index\n ] = nearest_pixelization_1d_index\n break\n else:\n nearest_pixelization_1d_index = neighboring_pixelization_1d_index\n\n return sub_mask_1d_index_to_pixeliztion_1d_index", "def inpaint(self, img_slice, mask_slice, min_x, max_x, min_y, max_y, views='lateral'):\n # create binary mask\n mask = np.zeros(img_slice.shape)\n mask[min_x:max_x, min_y:max_y] = 1\n # keep a copy of original to have background later \n img_orig = np.copy(img_slice)\n mask_binary = np.copy(mask)\n\n # rotate image if coronal\n if views=='coronal':\n img_slice = np.rot90(img_slice, axes=(1, 0)) # image is from lat,ax -> ax,lat\n mask_slice = np.rot90(mask_slice, axes=(1, 0))\n mask = np.rot90(mask, axes=(1, 0))\n \n # prepare binary mask for net\n mask = cv2.resize(mask, self.resize_size, interpolation=cv2.INTER_NEAREST)\n mask = torch.Tensor(mask) # gives dtype float32\n mask = mask.unsqueeze(0)\n mask = mask.unsqueeze(0)\n\n # prepare seg mask for net\n mask_slice[mask_slice==self.vertebra_id] = 0\n # resize to network size\n mask_seg = cv2.resize(mask_slice, self.resize_size, interpolation=cv2.INTER_NEAREST)\n mask_seg = np.uint8(np.round(mask_seg)) # just to be sure\n\n mask_seg = self.map_vert_to_class(mask_seg)\n mask_seg = torch.Tensor(mask_seg) # gives dtype float32\n mask_seg_one_hot = torch.nn.functional.one_hot(mask_seg.long(), num_classes=6)\n mask_seg_one_hot = mask_seg_one_hot.permute(2,0,1)\n mask_seg_one_hot = mask_seg_one_hot.unsqueeze(0)\n mask_seg = mask_seg.unsqueeze(0)\n mask_seg = mask_seg.unsqueeze(0)\n\n # prepare img for net \n img_slice = cv2.resize(img_slice, self.resize_size)\n img_slice = np.clip(img_slice, -1024, 3071) # clip to HU units\n img_slice = np.uint8(255*(img_slice+1024)/4095) # normalize to range 0-255 \n img_slice = img_slice[:,:, None]\n img_slice = self.toTensor(img_slice)\n img_slice = img_slice.unsqueeze(0)\n corrupt_img = (1-mask)*img_slice\n\n if self.use_cuda:\n mask = mask.cuda()\n mask_seg = mask_seg.cuda()\n corrupt_img = corrupt_img.cuda() \n\n # inpaint\n if views=='lateral':\n netG = self.netGlat\n elif views=='coronal':\n netG = self.netGcor\n\n # get prediction\n with torch.no_grad():\n _, inpainted_mask, inpainted_img = netG(corrupt_img, mask_seg, mask)\n inpainted_mask = self.softmax(inpainted_mask)\n\n #inpainted_mask = torch.argmax(inpainted_mask, dim=1)\n inpainted_img = inpainted_img * mask + corrupt_img * (1. - mask)\n inpainted_mask = inpainted_mask * mask + mask_seg_one_hot * (1. - mask)\n #inpainted_mask = self.map_class_to_vert(inpainted_mask)\n\n # set img back to how it was\n inpainted_img = inpainted_img.squeeze().detach().cpu().numpy()\n inpainted_img = (inpainted_img)*4095 - 1024 # normalize back to HU units \n inpainted_img = cv2.resize(inpainted_img, (self.orig_ax_length, self.orig_ax_length))\n # set mask back\n inpainted_mask = inpainted_mask.squeeze().detach().cpu().numpy()\n inpainted_mask_resized = np.zeros((6, self.orig_ax_length, self.orig_ax_length))\n for i in range(6):\n if views=='coronal':\n inpainted_mask_resized[i,:,:] = np.rot90(cv2.resize(inpainted_mask[i,:,:], (self.orig_ax_length, self.orig_ax_length))) #, interpolation=cv2.INTER_NEAREST)\n else:\n inpainted_mask_resized[i,:,:] = cv2.resize(inpainted_mask[i,:,:], (self.orig_ax_length, self.orig_ax_length)) #, interpolation=cv2.INTER_NEAREST)\n inpainted_mask = inpainted_mask_resized\n \n if views=='coronal':\n inpainted_img = np.rot90(inpainted_img) #, axes=(1, 0))\n\n return inpainted_img, inpainted_mask, mask_binary", "def wireframe(self, projection_type, canvas_dimensions):\n # Configure viewportself.screen_dimensions = {\n self.screen_dimensions = {\n \"width\": canvas_dimensions['width'],\n \"height\": canvas_dimensions['height']\n }\n\n self.projection.viewport = self.screen_dimensions\n self.projection.projection_type = projection_type\n self.projection.camera = self.cameras[0]\n self.projection.region_width = self.screen_dimensions.get('width')\n self.projection.region_height = self.screen_dimensions.get('height')\n\n # Draw polygons for each object\n projected_objects = []\n for obj in self.objects:\n print('Rendering: ', obj)\n\n world_transformation = obj.translate(\n obj.rotate(obj.scale(obj.vertices))\n )\n camera_transformation = obj.rotate(\n obj.translate(world_transformation, np.array(\n [\n -self.projection.camera.translation[0],\n -self.projection.camera.translation[1],\n -self.projection.camera.translation[2]\n ]\n )), np.array(\n [\n -self.projection.camera.rotation[0],\n -self.projection.camera.rotation[1],\n -self.projection.camera.rotation[2]\n ]\n \n )\n )\n projected_view = self.projection.project_all(camera_transformation)\n normalized_view = obj.normalize(\n projected_view, self.projection.viewport\n )\n projected_faces = []\n for face in obj.faces:\n poly = []\n for vertex_index in face:\n poly.append(\n [\n int(normalized_view[vertex_index][0]),\n int(normalized_view[vertex_index][1]),\n int(camera_transformation[vertex_index][2])\n ]\n )\n projected_faces.append(poly)\n center = list(obj.calculate_center(normalized_view))\n vertices = [ [int(p[0]), int(p[1]), int(p[2])] for p in normalized_view]\n # print('calculated_center: ', center)\n # print(''vertices)\n projected_objects.append({\n 'vertices': vertices,\n 'faces': obj.clip(self.projection.camera.translation, projected_faces),\n 'center': [ int(coord) for coord in obj.calculate_center(normalized_view) ],\n })\n print(projected_objects[0]['faces'][:20])\n return projected_objects", "def chooseObject(self, x, y, z):\r\n\r\n \"\"\"deletes the editPoints list. This must be reset every time a new object\r\n is clicked on so that the prevous editpoints from another segmentation do not\r\n interfere with the current object's segmentation\"\"\"\r\n \r\n del self.editPoints[:]\r\n \r\n self.dispedge = to_rgb(self.img[self.z_stack])\r\n \r\n #print \"SENDER LABEL: \" + str(sender.text()\r\n self.center= np.array((z*self.zinterp, x, y))\r\n\r\n xpix=self.img.shape[2]-1\r\n ypix= self.img.shape[1]-1\r\n zpix= self.img.shape[0]*self.zinterp-1\r\n \r\n\r\n #currently padding all sides by 50 for now.\r\n\r\n self.radius= self.curRadius\r\n self.count+=1\r\n \r\n self.padList= np.array([xpix-x, ypix-y, zpix-z*self.zinterp, x, y,z*self.zinterp])\r\n self.padList= self.radius-self.padList\r\n self.padList=(self.padList>0)*self.padList\r\n\r\n \"\"\"perform graphcut and display on interface\"\"\"\r\n self.temp, self.edge= graphCut(interp_img(self.img, self.zinterp), self.center, self.radius,self.temp, self.edge, self.count, self.editPoints, self.padList, self.theta_div, self.phi_div)\r\n self.shrink= self.edge[0:interp_img(self.img, self.zinterp).shape[0]:self.zinterp]!=0\r\n \r\n \"\"\"display the object on the xy plane\"\"\"\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n \r\n \"\"\"display the object on the xz plane\"\"\"\r\n self.pixmap4=self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n \r\n \"\"\"display the object on the yz plane\"\"\"\r\n self.pixmap6=self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)", "def get_boundary_layers(cell_cent, el, num_lyrs, bc_loc, struct_grd):\n dim = len(el)\n bound_range = np.zeros(2*dim, dtype=float)\n bound_nodes = {} #dict to store the node numbers of centroids that lie within bound_range\n if(struct_grd):\n fctr = 1\n corr = 0\n lyrs = float(num_lyrs-1)+ 0.0001\n else:\n fctr = 2\n corr = 1\n lyrs = float(num_lyrs)+ 0.0001\n\n lyrs = 1.0001*float(num_lyrs-1)\n for d in range(dim):\n bound_range[2*d] = np.min(cell_cent[:,d]) + corr*np.diff(np.unique(cell_cent[:,d])[0:2])[0] + lyrs*el[d]\n bound_range[2*d+1] = np.max(cell_cent[:,d]) - corr*np.diff(np.unique(cell_cent[:,d])[0:2])[0] - lyrs*el[d]\n\n bound_nodes[2*d] = np.where(cell_cent[:,d] <= bound_range[2*d])\n bound_nodes[(2*d+1)] = np.where(cell_cent[:,d] >= bound_range[2*d+1])\n\n #store only those key value pair that are in the bc_loc\n #this in the end returns mesh with ghost layer cells, \n #if they've been applied already\n keys = bound_nodes.keys()\n keys_temp = [kk for kk in keys]\n for kk in keys_temp:\n if kk not in bc_loc:\n bound_nodes.pop(kk, None)\n \n return bound_nodes", "def cell_merge(wsh, pred):\n wshshape=wsh.shape\n \n # masks for the original cells\n objs = np.zeros((wsh.max()+1,wshshape[0],wshshape[1]), dtype=bool)\t\n \n # masks for dilated cells\n dil_objs = np.zeros((wsh.max()+1,wshshape[0],wshshape[1]), dtype=bool)\n \n # bounding box coordinates\t\n obj_coords = np.zeros((wsh.max()+1,4))\n \n # cleaned watershed, output of function\t\n wshclean = np.zeros((wshshape[0],wshshape[1]))\n \n # kernel to dilate objects\n kernel = np.ones((3,3), dtype=bool)\t\n \n for obj1 in range(wsh.max()):\n # create masks and dilated masks for obj\n objs[obj1,:,:] = wsh==(obj1+1)\t\n dil_objs[obj1,:,:] = dilation(objs[obj1,:,:], kernel)\t\n \n # bounding box\n obj_coords[obj1,:] = get_bounding_box(dil_objs[obj1,:,:])\n \n objcounter = 0\t# counter for new watershed objects\n \n for obj1 in range(wsh.max()):\t\n dil1 = dil_objs[obj1,:,:]\n\n # check if mask has been deleted\n if np.sum(dil1) == 0:\n continue\n \n objcounter = objcounter + 1\n orig1 = objs[obj1,:,:]\n\n for obj2 in range(obj1+1,wsh.max()):\n dil2 = dil_objs[obj2,:,:]\n \n # only check border if bounding box overlaps, and second mask \n # is not yet deleted\n if (do_box_overlap(obj_coords[obj1,:], obj_coords[obj2,:])\n and np.sum(dil2) > 0):\n \n border = dil1 * dil2\t\n border_pred = pred[border]\n \n # Border is too small to be considered\n if len(border_pred) < 32:\n continue\n \n # Sum of top 25% of predicted border values\n q75 = np.quantile(border_pred, .75)\n top_border_pred = border_pred[border_pred >= q75]\n top_border_height = top_border_pred.sum()\n top_border_area = len(top_border_pred)\n \n # merge cells\n if top_border_height / top_border_area > .99:\n orig1 = np.logical_or(orig1, objs[obj2,:,:])\n dil_objs[obj1,:,:] = np.logical_or(dil1, dil2)\n dil_objs[obj2,:,:] = np.zeros((wshshape[0], wshshape[1]))\n obj_coords[obj1,:] = get_bounding_box(dil_objs[obj1,:,:])\n \n wshclean = wshclean + orig1*objcounter\n \n return wshclean", "def forward_problem(m):\n hm = om.HeadMat(m['geometry'])\n hm.invert() # invert in place (no copy)\n dsm = om.DipSourceMat(m['geometry'], m['dipsources'])\n return hm * dsm", "def match_obj_hole(post_grasp_pos_patch,\n pre_grasp_pos_patch,\n post_x=None, post_y=None):\n if post_x is None:\n post_x, post_y, _ = find_single_blob_center(post_grasp_pos_patch)\n obj_center_x = int(pre_grasp_pos_patch.shape[1] / 2)\n obj_center_y = int(pre_grasp_pos_patch.shape[0] / 2)\n\n pre_grasp_pos_patch.dtype = np.int8\n post_grasp_pos_patch.dtype = np.int8\n opt_rot, opt_row_trans, opt_col_trans = 0., 0, 0\n\n old_neg_count = 2048\n neg_count = old_neg_count - 2\n\n is_erosion = False\n erosion_count = 0\n obj_patch = pre_grasp_pos_patch.copy()\n\n # Okay so the matching is actually iterative binary search\n while old_neg_count > neg_count:\n # print(old_neg_count, neg_count)\n\n if neg_count < 1:\n # print('ERODE')\n is_erosion = True\n erosion_count += 1\n kernel = np.ones([3, 3])\n post_grasp_pos_patch.dtype = np.uint8\n post_grasp_pos_patch = cv2.erode(post_grasp_pos_patch,\n kernel,\n iterations=1)\n post_grasp_pos_patch.dtype = np.int8\n neg_count = 2048\n\n old_neg_count = neg_count\n row_trans, col_trans, neg_count_1 = get_opt_translate(obj_img=obj_patch,\n back_img=post_grasp_pos_patch,\n back_center_x=post_x,\n back_center_y=post_y,\n obj_center_x=obj_center_x,\n obj_center_y=obj_center_y,\n prev_row_trans=opt_row_trans,\n prev_col_trans=opt_col_trans,\n is_erosion=is_erosion)\n if neg_count_1 < old_neg_count:\n opt_row_trans = row_trans\n opt_col_trans = col_trans\n rot_res, neg_count_2 = get_opt_rotate(obj_img=pre_grasp_pos_patch,\n back_img=post_grasp_pos_patch,\n back_center_x=post_x + opt_col_trans,\n back_center_y=post_y + opt_row_trans,\n obj_center_x=obj_center_x,\n obj_center_y=obj_center_y,\n prev_rot_angle=opt_rot,\n is_erosion=is_erosion)\n if neg_count_2 < neg_count_1:\n opt_rot = rot_res\n neg_count = min(neg_count_1, neg_count_2)\n obj_patch = ndimage.rotate(pre_grasp_pos_patch, opt_rot, reshape=False)\n return is_erosion, erosion_count, int(opt_row_trans), int(opt_col_trans), opt_rot, post_x, post_y", "def __init__(self, n_pixels_u, n_pixels_v, detector_size_u, detector_size_v, source_to_detector_dist,\n source_to_object_dist, angular_inc=1, center_of_rot=0, **kwargs):\n\n self.n_pixels_u = n_pixels_u\n self.n_pixels_v = n_pixels_v\n\n self.detector_size_u = detector_size_u\n self.detector_size_v = detector_size_v\n self.source_to_detector_dist = source_to_detector_dist\n self.source_to_object_dist = source_to_object_dist\n self.angular_inc = angular_inc\n\n self.center_of_rot_u = center_of_rot\n\n # All values below are calculated\n\n self.projection_angs = np.arange(0., 360, self.angular_inc)\n self.n_projections = len(self.projection_angs)\n\n self.object_size_x = self.detector_size_u * self.source_to_object_dist / self.source_to_detector_dist\n self.object_size_y = self.detector_size_u * self.source_to_object_dist / self.source_to_detector_dist\n self.object_size_z = self.detector_size_v * self.source_to_object_dist / self.source_to_detector_dist\n\n self.voxel_size_x = self.object_size_x / self.n_pixels_u\n self.voxel_size_y = self.object_size_y / self.n_pixels_u\n self.voxel_size_z = self.object_size_z / self.n_pixels_v\n\n self.pixel_size_u = self.detector_size_u / self.n_pixels_u\n self.pixel_size_v = self.detector_size_v / self.n_pixels_v\n\n self.center_of_rot_y = self.center_of_rot_u * (\n self.source_to_object_dist / self.source_to_detector_dist) * self.pixel_size_u\n\n self.object_ys = (np.arange(self.n_pixels_u, dtype=np.float64) - self.n_pixels_u / 2.) * self.voxel_size_y\n self.object_xs = (np.arange(self.n_pixels_u, dtype=np.float64) - self.n_pixels_u / 2.) * self.voxel_size_x\n self.object_zs = (np.arange(self.n_pixels_v, dtype=np.float64) - self.n_pixels_v / 2.) * self.voxel_size_z\n\n self.detector_us = (np.arange(self.n_pixels_u,\n dtype=np.float64) - self.n_pixels_u / 2.) * self.pixel_size_u\n self.detector_vs = (np.arange(self.n_pixels_v,\n dtype=np.float64) - self.n_pixels_v / 2.) * self.pixel_size_v", "def populateCenters(matrix, row, col, frame, midRange, roughness, perturbance):\n maxIndex = matrix.shape[0]-1\n quarterRange = midRange/2\n\n pf = perturbanceFactor(matrix.shape[0], midRange, perturbance)\n noiseLevel = roughness * pf\n\n \"\"\"\n For each subdivided cube, getIndexRef is used to get the indicies, and center is used\n to determine the points that should be averaged and the point to be set. \n setValue does the calculations.\n \"\"\"\n indexRef = getIndexRef(row, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n\n #printAllowCancel(matrix)", "def cfdProcessGeometry(self):\r\n \r\n # self.faceCentroids']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceSf']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceAreas']= [[] for i in range(self.numberOfFaces'])]\r\n \r\n ## Linear weight of distance from cell center to face\r\n self.faceWeights= [[0] for i in range(self.numberOfFaces)]\r\n\r\n ## Not\r\n self.faceCF= [[0, 0, 0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceCf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceFf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDist= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDistLimited= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.elementCentroids= [[] for i in range(self.numberOfElements)]\r\n self.elementVolumes= [[] for i in range(self.numberOfElements)]\r\n \r\n \"\"\"\r\n Calculate:\r\n -face centroids (faceCentroids)\r\n -face normal (Sf)\r\n -face areas (faceAreas)\r\n \"\"\"\r\n \r\n #find cell with largest number of points\r\n maxPoints=len(max(self.faceNodes, key=len))\r\n forCross1 = [[] for i in range(maxPoints)]\r\n forCross2 = [[] for i in range(maxPoints)]\r\n local_faceCentroid=[[] for i in range(maxPoints)]\r\n \r\n for iFace in range(self.numberOfFaces):\r\n theNodeIndices = self.faceNodes[iFace]\r\n theNumberOfFaceNodes = len(theNodeIndices)\r\n \r\n #compute a rough centre of the face\r\n local_centre = [0,0,0]\r\n \r\n for iNode in theNodeIndices:\r\n local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n \r\n local_centre = local_centre/theNumberOfFaceNodes\r\n \r\n for iTriangle in range(theNumberOfFaceNodes):\r\n \r\n point1 = local_centre\r\n point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n \r\n if iTriangle < theNumberOfFaceNodes-1:\r\n point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n else:\r\n point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n \r\n local_faceCentroid[iTriangle].append((point1+point2+point3)/3)\r\n \r\n left=point2-point1\r\n right=point3-point1\r\n \r\n forCross1[iTriangle].append(left)\r\n forCross2[iTriangle].append(right)\r\n \r\n \r\n local_Sf=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n local_area=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n \r\n centroid=np.zeros([self.numberOfFaces,3])\r\n area=np.zeros([self.numberOfFaces])\r\n Sf=np.zeros([self.numberOfFaces,3])\r\n \r\n #cells with fewer faces than others are full of zeros\r\n for i in range(maxPoints):\r\n \r\n forCrossLeft=np.vstack(np.array(forCross1[i]))\r\n forCrossRight=np.vstack(np.array(forCross2[i]))\r\n \r\n local_Sf[i]=0.5*np.cross(forCrossLeft,forCrossRight)\r\n local_area[i]=np.linalg.norm(local_Sf[i],axis=1)\r\n \r\n centroid = centroid + np.array(local_faceCentroid[i])*local_area[i][:,None]\r\n Sf=Sf+local_Sf[i]\r\n area=area+local_area[i]\r\n \r\n self.faceCentroids=centroid/area[:,None]\r\n self.faceSf=Sf\r\n self.faceAreas=area \r\n \r\n \r\n \"\"\"\r\n Pure python version - causes slowness due to iterative np.cross()\r\n \"\"\"\r\n \r\n # for iFace in range(self.numberOfFaces):\r\n # theNodeIndices = self.faceNodes[iFace]\r\n # theNumberOfFaceNodes = len(theNodeIndices)\r\n # \r\n # #compute a rough centre of the face\r\n # local_centre = [0,0,0]\r\n # \r\n # for iNode in theNodeIndices:\r\n # \r\n # local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n # \r\n # local_centre = local_centre/theNumberOfFaceNodes\r\n # centroid = [0, 0, 0]\r\n # Sf = [0,0,0]\r\n # area = 0\r\n # \r\n # #finds area of virtual triangles and adds them to the find to find face area\r\n # #and direction (Sf)\r\n # \r\n # \r\n # \r\n # for iTriangle in range(theNumberOfFaceNodes):\r\n # point1 = local_centre\r\n # point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n # \r\n # if iTriangle < theNumberOfFaceNodes-1:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n # else:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n # \r\n # local_centroid = (point1 + point2 + point3)/3\r\n # \r\n # left=point2-point1\r\n # right=point3-point1\r\n # x = 0.5*((left[1] * right[2]) - (left[2] * right[1]))\r\n # y = 0.5*((left[2] * right[0]) - (left[0] * right[2]))\r\n # z = 0.5*((left[0] * right[1]) - (left[1] * right[0]))\r\n # local_Sf=np.array([x,y,z])\r\n # \r\n # local_area = np.linalg.norm(local_Sf)\r\n # \r\n # centroid = centroid + local_area*local_centroid\r\n # Sf = Sf + local_Sf\r\n # area = area + local_area\r\n # centroid = centroid/area\r\n # self.faceCentroids[iFace]=centroid\r\n # self.faceSf[iFace]=Sf\r\n # self.faceAreas[iFace]=area\r\n \r\n \r\n \"\"\"\r\n Calculate:\r\n -element centroids (elementCentroids)\r\n -element volumes (elementVolumes)\r\n \"\"\"\r\n for iElement in range(self.numberOfElements):\r\n \r\n theElementFaces = self.elementFaces[iElement]\r\n \r\n #compute a rough centre of the element\r\n local_centre = [0,0,0]\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n local_centre = local_centre + self.faceCentroids[faceIndex]\r\n \r\n local_centre = local_centre/len(theElementFaces)\r\n \r\n localVolumeCentroidSum = [0,0,0]\r\n localVolumeSum = 0\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n \r\n Cf = self.faceCentroids[faceIndex]-local_centre\r\n \r\n faceSign = -1\r\n if iElement == self.owners[faceIndex]:\r\n faceSign = 1\r\n \r\n local_Sf = faceSign*self.faceSf[faceIndex]\r\n \r\n localVolume = np.dot(local_Sf,Cf)/3\r\n \r\n localCentroid = 0.75*self.faceCentroids[faceIndex]+0.25*local_centre\r\n \r\n localVolumeCentroidSum = localVolumeCentroidSum + localCentroid*localVolume\r\n \r\n localVolumeSum = localVolumeSum + localVolume\r\n \r\n self.elementCentroids[iElement]=localVolumeCentroidSum/localVolumeSum\r\n self.elementVolumes[iElement]=localVolumeSum\r\n \r\n \r\n for iFace in range(self.numberOfInteriorFaces):\r\n \r\n n=self.faceSf[iFace]/np.linalg.norm(self.faceSf[iFace])\r\n own=self.owners[iFace]\r\n nei = self.neighbours[iFace]\r\n \r\n self.faceCF[iFace]=self.elementCentroids[nei]-self.elementCentroids[own]\r\n self.faceCf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[own]\r\n self.faceFf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[nei]\r\n self.faceWeights[iFace]=(-np.dot(self.faceFf[iFace],n))/(-np.dot(self.faceFf[iFace],n)+np.dot(self.faceCf[iFace],n))\r\n \r\n for iBFace in range(self.numberOfInteriorFaces, self.numberOfFaces):\r\n \r\n \r\n n=self.faceSf[iBFace]/np.linalg.norm(self.faceSf[iBFace])\r\n own=self.owners[iBFace]\r\n \r\n self.faceCF[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own]\r\n self.faceCf[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own] \r\n self.faceWeights[iBFace]=1\r\n self.wallDist[iBFace]= max(np.dot(self.faceCf[iBFace], n), 1e-24)\r\n self.wallDistLimited[iBFace]= max(self.wallDist[iBFace], 0.05*np.linalg.norm(self.faceCf[iBFace]))", "def detect_object(world):\n # create the map with only the obstucale to non-zero\n world_hsv = cv2.cvtColor(world, cv2.COLOR_BGR2HSV)\n mask_red = cv2.inRange(world_hsv, low_red, up_red)\n occupancy_grid = np.array(mask_red)\n world_rows, world_cols, _ = world.shape\n\n # create the mask in order to find the goal\n world_hsv = cv2.cvtColor(world, cv2.COLOR_BGR2HSV)\n mask_goal = cv2.inRange(world_hsv, low_blue, up_blue)\n goal_x, goal_y = (15, 15) # goal by default\n\n # look for the obstacle and increase there size\n for i in range(world_rows):\n for j in range(world_cols):\n occupancy_grid[i][j] = int(occupancy_grid[i][j] / 255)\n if mask_goal[i][j] > 200:\n goal_x, goal_y = (i, j)\n object_grid = [[goal_x, goal_y]]\n return object_grid, occupancy_grid", "def crossing_minimization(self):\n self.layer_sweep()", "def auto_rivet():\n sel_list = pm.ls(sl=1)\n\n # the last selection is the mesh\n objects = sel_list[:-1]\n geo = sel_list[-1]\n\n # get the closest point to the surface\n geo_shape = geo.getShape()\n\n follicles = []\n\n for obj in objects:\n # pivot point of the obj\n pivot = obj.getRotatePivot(space='world')\n uv = geo_shape.getUVAtPoint(pivot, space='world')\n\n # create a hair follicle\n follicle = pm.nt.Follicle()\n follicles.append(follicle)\n follicle.simulationMethod.set(0)\n geo_shape.worldMatrix >> follicle.inputWorldMatrix\n geo_shape.outMesh >> follicle.inputMesh\n follicle.parameterU.set(uv[0])\n follicle.parameterV.set(uv[1])\n\n # parent the object to the follicles transform node\n follicle_transform = follicle.getParent()\n\n follicle.outTranslate >> follicle_transform.translate\n follicle.outRotate >> follicle_transform.rotate\n\n pm.parent(obj, follicle_transform)\n\n return follicles", "def makeindmapKDE(self,indmap,s, background):\n import ipyml\n from ipyml.probability import pfunc\n sp = background.shape\n res = np.zeros((sp[0], sp[1]),dtype=np.float32)\n wr,wc = indmap.shape[0], indmap.shape[1]\n filter_size = 30\n stride = 12\n cov = np.asarray([[(2.0/filter_size)**2,0],[0,(2.0/filter_size)**2]])\n if 'g' in self.temp_data:\n g = self.temp_data['g']\n else:\n g = pfunc.Gaussian2D((sp[0],sp[1]),cov=cov,invcov=False)\n self.temp_data['g'] = g\n center_r = sp[0]\n center_c = sp[1]\n g = g/g.max()\n for r in range(wr):\n for c in range(wc):\n # calcuate the center of detection window\n rr = (r * stride + r * stride + filter_size-1)/2\n cc = (c * stride + c * stride + filter_size-1)/2\n offset_r = center_r - rr\n offset_c = center_c - cc\n res = res + g[offset_r:offset_r+sp[0],offset_c:offset_c+sp[1]] * indmap[r,c]\n idx = np.argmax(res)\n res = np.tile(res.reshape((res.shape[0],res.shape[1],1)),[1,1,3])\n mr = idx / sp[1]\n mc = idx - mr * sp[1]\n hf = filter_size/2\n box = np.asarray([mc -hf,mr -hf,mc + hf, mr + hf])\n return res/3, box", "def _compute_topology(self, initial_surfaces):\n\n nsurf = len(initial_surfaces)\n surfaces = numpy.zeros((nsurf, 3, 3, 3), float, 'F')\n\n for isurf in xrange(nsurf):\n surface = initial_surfaces[isurf]\n num_u, num_v = surface.shape[:2]\n mid_u1 = int(numpy.floor((num_u - 1) / 2.0))\n mid_u2 = int(numpy.ceil((num_u - 1) / 2.0))\n mid_v1 = int(numpy.floor((num_v - 1) / 2.0))\n mid_v2 = int(numpy.ceil((num_v - 1) / 2.0))\n\n for ind_u in xrange(2):\n for ind_v in xrange(2):\n surfaces[isurf, -ind_u, -ind_v] = surface[-ind_u, -ind_v]\n\n for ind_u in xrange(2):\n surfaces[isurf, -ind_u, 1] += 0.5 * surface[-ind_u, mid_v1] + \\\n 0.5 * surface[-ind_u, mid_v2]\n\n for ind_v in xrange(2):\n surfaces[isurf, 1, -ind_v] += 0.5 * surface[mid_u1, -ind_v] + \\\n 0.5 * surface[mid_u2, -ind_v]\n\n nvert, nedge, surf_ptrs \\\n = BSElib.computesurfconnectivities(nsurf, 1e-16, 1e-10, surfaces)\n\n edge_ptrs \\\n = BSElib.computeedgeconnectivities(nsurf, nedge, surf_ptrs)\n\n ngroup, surf_group, edge_group \\\n = BSElib.computegroups(nsurf, nedge, surf_ptrs)\n\n topology = [nvert, nedge, ngroup, \\\n surf_ptrs, edge_ptrs, \\\n surf_group, edge_group, \\\n ]\n\n return topology", "def remesh_blocks():\n \n # Get the active object\n obj = bpy.context.active_object\n \n nameCopy = \"temp_copy\"\n\n # Switch in object mode \n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Remove all modifiers from the object\n obj.modifiers.clear()\n\n # Delete the existing copy \n for o in bpy.data.objects:\n if o.type == 'MESH' and o.name == nameCopy:\n # Delete the existing copy\n object_to_delete = bpy.data.objects[nameCopy]\n bpy.data.objects.remove(object_to_delete, do_unlink=True) \n \n \n # Make a copy of the object\n new_obj = obj.copy()\n new_obj.data = obj.data.copy()\n new_obj.animation_data_clear()\n bpy.context.collection.objects.link(new_obj)\n\n # Rename the copy\n new_obj.name = nameCopy\n\n # Hide the copy\n new_obj.hide_viewport = True\n\n # Remesh the faces of the object with blocks\n bpy.ops.object.modifier_add(type='REMESH')\n bpy.context.object.modifiers[\"Remesh\"].mode = 'BLOCKS'\n bpy.context.object.modifiers[\"Remesh\"].octree_depth = bpy.context.scene.level_blocks\n bpy.context.object.modifiers[\"Remesh\"].scale = 0.99\n bpy.context.object.modifiers[\"Remesh\"].use_remove_disconnected = False\n bpy.context.object.modifiers[\"Remesh\"].threshold = 1\n bpy.context.object.modifiers[\"Remesh\"].use_smooth_shade = False\n\n # Make intersection between the remesh object and the original\n bpy.ops.object.modifier_add(type='BOOLEAN')\n bpy.context.object.modifiers[\"Boolean\"].operation = 'INTERSECT'\n bpy.context.object.modifiers[\"Boolean\"].operand_type = 'OBJECT'\n bpy.context.object.modifiers[\"Boolean\"].object = bpy.data.objects[nameCopy]\n bpy.context.object.modifiers[\"Boolean\"].solver = 'FAST'\n bpy.context.object.modifiers[\"Boolean\"].double_threshold = 0", "def _compute_object_center_loss(self, input_height, input_width,\n object_center_predictions, per_pixel_weights,\n maximum_normalized_coordinate=1.1):\n gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n\n if self._center_params.use_labeled_classes:\n gt_labeled_classes_list = self.groundtruth_lists(\n fields.InputDataFields.groundtruth_labeled_classes)\n batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0)\n batch_labeled_classes_shape = tf.shape(batch_labeled_classes)\n batch_labeled_classes = tf.reshape(\n batch_labeled_classes,\n [batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]])\n per_pixel_weights = per_pixel_weights * batch_labeled_classes\n\n # Convert the groundtruth to targets.\n assigner = self._target_assigner_dict[OBJECT_CENTER]\n if self._center_from_keypoints:\n gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)\n heatmap_targets = assigner.assign_center_targets_from_keypoints(\n height=input_height,\n width=input_width,\n gt_classes_list=gt_classes_list,\n gt_keypoints_list=gt_keypoints_list,\n gt_weights_list=gt_weights_list)\n else:\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n heatmap_targets = assigner.assign_center_targets_from_boxes(\n height=input_height,\n width=input_width,\n gt_boxes_list=gt_boxes_list,\n gt_classes_list=gt_classes_list,\n gt_weights_list=gt_weights_list,\n maximum_normalized_coordinate=maximum_normalized_coordinate)\n\n flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)\n num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))\n\n loss = 0.0\n object_center_loss = self._center_params.classification_loss\n # Loop through each feature output head.\n for pred in object_center_predictions:\n pred = _flatten_spatial_dimensions(pred)\n loss += object_center_loss(\n pred, flattened_heatmap_targets, weights=per_pixel_weights)\n loss_per_instance = tf.reduce_sum(loss) / (\n float(len(object_center_predictions)) * num_boxes)\n return loss_per_instance" ]
[ "0.7613577", "0.6724186", "0.5619329", "0.5326774", "0.52730316", "0.527127", "0.52315", "0.5224766", "0.51455975", "0.5141383", "0.5130152", "0.5116327", "0.5095988", "0.50457686", "0.5040918", "0.5028932", "0.50051355", "0.4972177", "0.49689552", "0.49586454", "0.49513674", "0.4950115", "0.49307323", "0.49243113", "0.49237522", "0.492302", "0.49146324", "0.4898267", "0.48967806", "0.489182" ]
0.8169945
0
Get the critic associated with this actor.
def critic(self) -> CriticType: if not self._critic: raise ValueError("no critic associated with this actor") return self._critic
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_critics(self):\n actors = [ddpg_agent.critic for ddpg_agent in self.maddpg_agent]\n return actors", "def _critic(self, image):\n return torch.mean(self.critic(image))", "def critic(self, critic: CriticType) -> None:\n self._critic = critic", "def ability(self, ability):\n\n return self.race.abilities.get(ability)", "def conductor(self):\n return self._S.level()", "def _construct_critic(self):\n z = Input(shape=(self.latent_dim,))\n fc_6 = bn_dense(z, 64, activation=None)\n lk_act_1 = LeakyReLU(0.2)(fc_6)\n fc_7 = bn_dense(lk_act_1, 32, activation=None)\n lk_act_2 = LeakyReLU(0.2)(fc_7)\n fc_8 = bn_dense(lk_act_2, 32, activation=None)\n lk_act_3 = LeakyReLU(0.2)(fc_8)\n real_prob = bn_dense(lk_act_3, 1, activation='sigmoid')\n\n critic = Model(z, real_prob)\n critic.compile(optimizer=self.critic_opt(lr=self.critic_learning_rate),\n loss='binary_crossentropy')\n return critic", "def get_course(self):\n bib = self.get_bib()\n obj = race()\n course = find(obj.courses, name=str(bib))\n if course:\n return course\n\n # get course via group\n person = self.get_person()\n if person and isinstance(person, Person):\n if person.group:\n return person.group.course\n\n return None", "def get_criterion_score(self, crit_node):\n return self.get_node('Score', parent=crit_node)", "def crit_ai(crit):\n if crit['type'] == 'crawler':\n # Crawlers move at random.\n return random.choice(['left','right','up','down'])\n #if crit['type'] == 'bullet':\n # return crit['dir']\n return None", "def W_Crit(self, multiplier=1):\n multiplier = str(multiplier);\n weapon_dice_count = self.Attribute_Power(\"weapon-num-dice\");\n weapon_dice = self.Attribute_Power(\"weapon-dice\");\n return \"\".join((\"(\", multiplier, \"*\", weapon_dice_count, \")*\", weapon_dice));", "def _critic(self):\n nactions = np.product(self.env.action_shape)\n action_input = keras.layers.Input(shape=(nactions,), name='action_input')\n obs_input = keras.layers.Input(shape=(1,) + self.env.observation_space.shape, name='observation_input')\n flattened_obs = keras.layers.Flatten()(obs_input)\n\n out = keras.layers.Concatenate()([action_input, flattened_obs])\n out = keras.layers.Dense(16)(out)\n out = keras.layers.Activation('relu')(out)\n out = keras.layers.Dense(8)(out)\n out = keras.layers.Activation('relu')(out)\n out = keras.layers.Dense(1)(out) # Must be single output\n out = keras.layers.Activation('linear')(out)\n critic = keras.models.Model(inputs=[action_input, obs_input], outputs=out)\n return critic, action_input", "def extract_critic_conditioning(self, data):\n return data[0]", "def criticize(self, env: FakeEnv) -> Tensor:\n c = Critique(env.observation)", "def Damage_Weapon_Mod_Crit(self, w_multiplier, damage_type=\"\"):\n damage_type = str(damage_type);\n if damage_type == \"\":\n return \"\".join((\"[[\", self.W_Crit(w_multiplier), \"+\", self.Attribute_Power(\"damage\"), \"]] damage.\"));\n else:\n return \"\".join((\"[[\", self.W_Crit(w_multiplier), \"+\", self.Attribute_Power(\"damage\"), \"]] \", damage_type, \" damage.\"));", "def getCondition(self):\r\n return self.controller.getCondition()", "def critical(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"critical\")", "def get(self, moraic_id=None):\n if moraic_id is not None:\n return self.morae.get(moraic_id)\n return self.morae", "def getCoefficient(self):\n return _libsbml.FluxObjective_getCoefficient(self)", "def getReaction(self):\n return _libsbml.FluxObjective_getReaction(self)", "def get_damage():\n\n return character['Damage']", "def get_action_critic(self, x): \n x = x.to(self.dummy_param.device)\n encoder_output, actor_output = self.get_penultimate(x)\n critic_output = self.critic_decoder(encoder_output)\n return actor_output, critic_output", "def check_for_crit_or_botch(self):\n return NaturalRollType.get_roll_type(self.raw_roll)", "def get_criterions(self, **kwargs):\n return self.get('criterions.json', **kwargs)", "def get_criteria(self):\n\n\t\treturn self.__criteria", "def getC(self):\n\t\treturn self.c", "def mission(self) -> Optional[Mission]:\n # XXX: This currently can be `None`\n return self._mission", "def military(self, instance):\r\n return instance.user.profile.military", "def get_cruft(self):\n\n raise computerjanitor.UnimplementedMethod(self.get_cruft)", "def critical_depth(self):\n crit_depth = math.pow((self.flow**2 /\n (self.width ** 2 * Channel.g)), (1/3))\n return crit_depth", "def criterion_type(self) -> str:\n return pulumi.get(self, \"criterion_type\")" ]
[ "0.6419532", "0.5767918", "0.56854165", "0.5523613", "0.54817516", "0.5477361", "0.541997", "0.5398939", "0.53664863", "0.5311079", "0.5240227", "0.52047384", "0.5190481", "0.5189437", "0.5118587", "0.50764984", "0.5053034", "0.50138736", "0.5009325", "0.49982783", "0.4973842", "0.4964071", "0.4933473", "0.4928684", "0.49095428", "0.49063444", "0.48852265", "0.48564887", "0.48554775", "0.48382735" ]
0.84475094
0
Set the critic of this actor. You probably do not want to do this manually.
def critic(self, critic: CriticType) -> None: self._critic = critic
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def critic(self) -> CriticType:\n if not self._critic:\n raise ValueError(\"no critic associated with this actor\")\n return self._critic", "def set_CriticsPick(self, value):\n super(SearchByReviewerInputSet, self)._set_input('CriticsPick', value)", "def criticize(self, env: FakeEnv) -> Tensor:\n c = Critique(env.observation)", "def set_risk(self, event):\n if not self.caller.check_permstring(\"builders\"):\n raise self.CalCmdError(\"Only GMs can set the risk of an event.\")\n try:\n risk = int(self.lhs)\n if risk > 10 or risk < 0:\n raise ValueError\n except (TypeError, ValueError):\n raise self.CalCmdError(\"Risk must be between 0 and 10.\")\n self.set_form_or_event_attribute(\"risk\", risk, event)\n self.msg(\"Risk is now set to: %s\" % risk)", "def set_actor_policy(self, actor_policy):\n raise NotImplementedError", "def setImmunity(self, immune):\n self._immune = immune", "def set_damage():\n\n global character\n character['Damage'] = randint(1, 6)", "def set_actor(self, vtk_act):\n assert isinstance(vtk_act, vtkActor)\n self.vtk_act = vtk_act", "def _setup_actor_critic_agent(self, ppo_cfg: Config) -> None:\n logger.add_filehandler(self.config.LOG_FILE)\n\n self.actor_critic = RearrangementBaselinePolicy(\n observation_space=self.envs.observation_spaces[0],\n action_space=self.envs.action_spaces[0],\n hidden_size=ppo_cfg.hidden_size,\n )\n self.actor_critic.to(self.device)\n\n self.agent = PPO(\n actor_critic=self.actor_critic,\n clip_param=ppo_cfg.clip_param,\n ppo_epoch=ppo_cfg.ppo_epoch,\n num_mini_batch=ppo_cfg.num_mini_batch,\n value_loss_coef=ppo_cfg.value_loss_coef,\n entropy_coef=ppo_cfg.entropy_coef,\n lr=ppo_cfg.lr,\n eps=ppo_cfg.eps,\n max_grad_norm=ppo_cfg.max_grad_norm,\n use_normalized_advantage=ppo_cfg.use_normalized_advantage,\n )", "def __init__(self, settings: ActorSettings) -> None:\n super().__init__()\n self.settings = settings\n\n self._critic = None", "def set_capacity(self, cap):\n return self.get_interaction().set_capacity(cap)", "def __init__(self,\n sess,\n ob_space,\n ac_space,\n co_space,\n buffer_size,\n batch_size,\n actor_lr,\n critic_lr,\n verbose,\n tau,\n gamma,\n use_huber,\n l2_penalty,\n model_params,\n num_envs=1):\n super(ActorCriticPolicy, self).__init__(\n sess=sess,\n ob_space=ob_space,\n ac_space=ac_space,\n co_space=co_space,\n verbose=verbose,\n l2_penalty=l2_penalty,\n model_params=model_params,\n num_envs=num_envs,\n )\n\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.actor_lr = actor_lr\n self.critic_lr = critic_lr\n self.tau = tau\n self.gamma = gamma\n self.use_huber = use_huber", "def crit_ai(crit):\n if crit['type'] == 'crawler':\n # Crawlers move at random.\n return random.choice(['left','right','up','down'])\n #if crit['type'] == 'bullet':\n # return crit['dir']\n return None", "def set_criteria(self, criteria):\n\n\t\ttry:\n\t\t\tfrom zcrmsdk.src.com.zoho.crm.api.custom_views.criteria import Criteria\n\t\texcept Exception:\n\t\t\tfrom .criteria import Criteria\n\n\t\tif criteria is not None and not isinstance(criteria, Criteria):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: criteria EXPECTED TYPE: Criteria', None, None)\n\t\t\n\t\tself.__criteria = criteria\n\t\tself.__key_modified['criteria'] = 1", "def setCoefficient(self, *args):\n return _libsbml.FluxObjective_setCoefficient(self, *args)", "def set ( self, trait_change_notify = True, **traits ):\n if not trait_change_notify:\n self._trait_change_notify( False )\n try:\n for name, value in traits.items():\n setattr( self, name, value )\n finally:\n self._trait_change_notify( True )\n else:\n for name, value in traits.items():\n setattr( self, name, value )\n\n return self", "def mitigated(self, mitigated):\n\n self._mitigated = mitigated", "def liability(self, liability):\n\n self._liability = liability", "def setReaction(self, *args):\n return _libsbml.FluxObjective_setReaction(self, *args)", "def _Learn(self, Actor, ActorTarget, actorOpt, experiances):\n Actor.train() # Set in Train Mode\n # Get split experiances into: states, actions ...\n states, actions, rewards, nextStates, dones = experiances\n # ....................... Update Critic .......................\n QTargetsNext = self.CriticTarget(nextStates, ActorTarget(nextStates))\n QTargets = rewards + (GAMMA * QTargetsNext * (1 - dones))\n QExpected = self.Critic(states, actions)\n # Minimize Loss & Update Weights\n critic_loss = F.smooth_l1_loss(QExpected, QTargets.detach())\n self.criticOpt.zero_grad()\n critic_loss.backward()\n T.nn.utils.clip_grad_norm(self.Critic.parameters(), 1)\n self.criticOpt.step()\n # ....................... Update Actor .......................\n actor_loss = -self.Critic(states, Actor(states)).mean()\n # Update Weights\n actorOpt.zero_grad()\n actor_loss.backward()\n T.nn.utils.clip_grad_norm(Actor.parameters(), 1)\n actorOpt.step()\n # ............. Update Actor & Critic Target Nets .............\n self.SoftUpdate(self.Critic, self.CriticTarget, TAU)\n self.SoftUpdate(Actor, ActorTarget, TAU)", "def criteria_met(self, criteria_met):\n\n self._criteria_met = criteria_met", "def W_Crit(self, multiplier=1):\n multiplier = str(multiplier);\n weapon_dice_count = self.Attribute_Power(\"weapon-num-dice\");\n weapon_dice = self.Attribute_Power(\"weapon-dice\");\n return \"\".join((\"(\", multiplier, \"*\", weapon_dice_count, \")*\", weapon_dice));", "def impact(self, impact):\n if impact is None:\n raise ValueError(\"Invalid value for `impact`, must not be `None`\") # noqa: E501\n if impact is not None and len(impact) < 1:\n raise ValueError(\"Invalid value for `impact`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._impact = impact", "def set_option(self, optname, value, action=None, optdict=None):\n if optname == 'max-complexity':\n self.max_complexity = int(value)", "def tower_damage(self, tower_damage):\n\n self._tower_damage = tower_damage", "def set_occupant(self):\n\t\tself.occupant = 1", "def _critic(self, image):\n return torch.mean(self.critic(image))", "def cargo_fuel(self, cargo_fuel):\n\n self._cargo_fuel = cargo_fuel", "def SetRestriction(self, restrict_type):\n return _hypre.HypreBoomerAMG_SetRestriction(self, restrict_type)", "def set_experience(self):\n if self.__experience < 50:\n self.__experience += 1" ]
[ "0.6765269", "0.55871475", "0.53288627", "0.5272914", "0.5172377", "0.51469475", "0.51087296", "0.5046028", "0.50317234", "0.4984842", "0.4978813", "0.49477944", "0.49282527", "0.49237365", "0.49106264", "0.48933023", "0.48489824", "0.4830805", "0.48293337", "0.4826514", "0.48117658", "0.48101074", "0.47895673", "0.47769356", "0.47711343", "0.47695175", "0.47524437", "0.47359502", "0.4730809", "0.47264597" ]
0.8091018
0
Generate policy parameters onthefly based on an environment state.
def _gen_policy_params(self, state: State) -> Tensor: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _gen_policy_params(self, state: State) -> Tensor:\n return self.network(state)", "def build_policy(env, Vvalues):\n Vpolicy = torch.zeros(NUM_STATES)\n\n for state in range(NUM_STATES):\n _, index = next_step_evaluation(env, state, Vvalues)\n Vpolicy[state] = index.item()\n\n return Vpolicy", "def getPolicy(self, state):\n util.raiseNotDefined()", "def getPolicy(self, state):\n util.raiseNotDefined()", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n a = self.config['alpha']\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n weights = self.weights[state]\n # print(weights)\n\n\n # obtains the probability vector from Hedge: p_i(t) = (1+alpha)^s_i(t) / sum_{j \\in K} (1+alpha)^s_j(t)\n sum_weights_exponentials = sum([(1 + a) ** w for w in weights])\n pre_prob = [(((1 + a) ** w) / sum_weights_exponentials) for w in weights]\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * p) + (g / n) for p in pre_prob]\n\n return pi_s", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n\n sum_weights = sum(self.weights[s])\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * w / sum_weights) + (g / n) for w in self.weights[s]]\n # print(state, pi_s)\n return pi_s", "def fixture_allparams():\n clp = Policy()\n return clp.metadata()", "def make_PPO2(env_name):\n env = util.make_vec_env(env_name, 8)\n # Didn't look at rl-baselines-zoo for this, but these hyperparameters\n # seem ok. They aren't great though.\n policy = stable_baselines.PPO2(util.FeedForward32Policy, env,\n verbose=0, tensorboard_log=\"output/\",\n learning_rate=3e-3,\n nminibatches=32,\n noptepochs=10,\n n_steps=2048)\n return policy", "def policy_iteration_on_secret_env1() -> PolicyAndValueFunction:\n result = get_policy_iteration(secret_env, 0.99, 0.01)\n export_to_json(result.pi, 'policy_iteration_secret_env_1')\n return result", "def generate_params(self, randomize=True):\n pass", "def _prepare_policy_input(self, state, legal_actions, step_rewards=None):\n check_for_nans(\"Raw state\", state)\n state_ = state.view(-1)\n\n if step_rewards is None or not step_rewards:\n step_rewards = [None for _ in legal_actions]\n batch_states = []\n\n assert legal_actions\n assert step_rewards\n assert len(legal_actions) == len(step_rewards)\n\n for action, log_likelihood in zip(legal_actions, step_rewards):\n action_ = self.action_factor * torch.tensor([action]).to(self.device, self.dtype)\n\n i, j = self.env.unwrap_action(action)\n pi = state[i, :]\n pj = state[j, :]\n check_for_nans(\"Individual momenta\", pi, pj)\n\n if self.log_likelihood_feature:\n if log_likelihood is None:\n log_likelihood = self._parse_action(action, from_which_env=\"real\")\n if not np.isfinite(log_likelihood):\n log_likelihood = 0.0\n log_likelihood = np.clip(log_likelihood, self.reward_range[0], self.reward_range[1])\n log_likelihood_ = self.log_likelihood_factor * torch.tensor([log_likelihood]).to(\n self.device, self.dtype\n )\n check_for_nans(\"Log likelihood as policy input\", log_likelihood_)\n\n combined_state = torch.cat((action_, pi, pj, log_likelihood_, state_), dim=0)\n check_for_nans(\"Individual policy input entry\", combined_state)\n else:\n combined_state = torch.cat((action_, pi, pj, state_), dim=0)\n check_for_nans(\"Individual policy input entry\", combined_state)\n\n batch_states.append(combined_state.unsqueeze(0))\n\n batch_states = torch.cat(batch_states, dim=0)\n check_for_nans(\"Concatenated policy input\", batch_states)\n return batch_states", "def _instantiate_parameter_states(self, context=None):\n\n from PsyNeuLink.Components.States.ParameterState import _instantiate_parameter_states\n _instantiate_parameter_states(owner=self, context=context)", "def extract_policy(env, v, gamma = 1.0):\n number_of_states = env.unwrapped.nS\n\n policy = np.zeros(number_of_states)\n for s in range(number_of_states):\n q_sa = np.zeros(env.action_space.n)\n for a in range(env.action_space.n):\n for next_sr in env.unwrapped.P[s][a]:\n # next_sr is a tuple of (probability, next state, reward, done)\n p, s_, r, _ = next_sr\n q_sa[a] += (p * (r + gamma * v[s_]))\n policy[s] = np.argmax(q_sa)\n return policy", "def create_params():\n\n params = {\n # Optimizer parameters (for Adam)\n \"beta1\": 0.9,\n \"beta2\": 0.999,\n \"epsilon\": 1e-7,\n \"learning_rate\": 0.001,\n\n # Input pipeline parameters\n \"parallel_reads\": 8, # Number of parallel file\n # readers per host.\n \"train_dataset_path\": FLAGS.train_dataset_path, # Glob specifing TFRecord\n # files with tf.examples.\n \"eval_dataset_path\": FLAGS.eval_dataset_path, # Glob specifing TFRecord\n # files with tf.examples.\n\n # Training paramaeters\n \"global_batch_size\": 512, # Global batch size for training.\n \"eval_global_batch_size\": 512, # Global batch size for eval.\n \"train_epochs\": 5, # Number of times to run train/eval loop.\n \"steps_per_epoch\": 100, # Number of training steps per epoch.\n \"num_eval_steps\": 10, # Number of eval steps per epoch\n\n # TPU parameters\n \"gcp_project\": FLAGS.gcp_project, # Project TPU is in.\n \"tpu_zone\": FLAGS.tpu_zone, # GCE zone the TPU is in.\n \"tpu\": FLAGS.tpu, # Name of the TPU.\n \"iterations_per_loop\": 200, # Number of iterations per device\n # training loop.\n \"pipeline_execution\": False, # If True, speed up training by\n # overlaping embedding lookups with\n # dense layer computations. Embedding\n # lookups will be one step old.\n \"use_gradient_accumulation\": True, # If False, speed up training by\n # applying embedding optimizer in\n # batches smaller than global batch\n # size.\n \"use_tpu\": True, # If False, uses CPU to train.\n\n # Model parameters\n \"model_dir\": FLAGS.model_dir, # Directory in which to store checkpoints.\n \"model_layers\": [100, 75, 50], # Sizes of dense layers for model\n \"num_categories\": 10, # Number of output categories.\n \"table_1_dimension\": 128, # Dimension of embedding table 1.\n \"table_1_rows\": 100, # Number of feature values in table 1.\n \"table_2_dimension\": 256, # Dimension of embedding table 2.\n \"table_2_rows\": 1000, # Number of feature values in table 2.\n }\n\n tf.logging.info(\"Params: {}\".format(params))\n\n return params", "def getPolicy(self, state):\n return self.policy[state]", "def policy_parameters(self) -> Optional[pulumi.Input['PolicyParametersArgs']]:\n return pulumi.get(self, \"policy_parameters\")", "def policy_parameters(self) -> Optional[pulumi.Input['PolicyParametersArgs']]:\n return pulumi.get(self, \"policy_parameters\")", "def _setup():\n\n # Set random seeds\n tf.set_random_seed(CONFIG.seed)\n np.random.seed(CONFIG.seed)\n\n gs_env = tf.Variable(0, trainable=False, name='global_step_env')\n inc_gs = tf.assign_add(gs_env, 1)\n\n # Make the gym environment\n if CONFIG.env == 'mnist':\n env = MNIST()\n elif CONFIG.env == 'binary':\n env = BinaryClassifier(done_every=CONFIG.ep_len)\n else:\n raise ValueError('Do not recognize environment ', CONFIG.env)\n\n discrete = isinstance(env.action_space, gym.spaces.Discrete)\n\n # Observation and action sizes\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.n if discrete else env.action_space.shape[0]\n\n policy_net = PolicyEstimator(ob_dim, ac_dim, gs_env)\n value_net = ValueEstimator(ob_dim)\n\n return env, inc_gs, policy_net, value_net", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict", "def init_envs():\n myenv = env(\n Ndim=N_DIMS,\n lambda_over_dx=LAMBDA_OVER_DX,\n R_dt=R_DT,\n norm_Poisson=NORM_POISSON,\n Ngrid=N_GRID,\n Nhits=N_HITS,\n )\n if POLICY == -1:\n mymodel = reload_model(MODEL_PATH, inputshape=myenv.NN_input_shape)\n mypol = RLPolicy(\n env=myenv,\n model=mymodel,\n )\n else:\n mypol = HeuristicPolicy(\n env=myenv,\n policy=POLICY,\n steps_ahead=STEPS_AHEAD,\n )\n return myenv, mypol", "def _generate_params(self):\n return {\n 'lis_outcome_service_url': self.lis_outcome_service_url,\n 'lis_result_sourcedid': self.lis_result_sourcedid,\n 'oauth_consumer_key': self.key\n }", "def extract_policy(env, v, gamma = 1.0):\n policy = np.zeros(env.nS)\n for s in range(env.nS):\n q_sa = np.zeros(env.action_space.n)\n for a in range(env.action_space.n):\n for next_sr in env.P[s][a]:\n # next_sr is a tuple of (probability, next state, reward, done)\n p, s_, r, _ = next_sr\n q_sa[a] += (p * (r + gamma * v[s_]))\n policy[s] = np.argmax(q_sa)\n return policy", "def create_policy(env, policy_type, policy_weights_file=None):\n input_size = env.observation_space.shape[0]\n output_size = env.action_space.shape[0]\n action_low = env.action_space.low\n action_high = env.action_space.high\n policy = policy_type(input_size=input_size,\n output_size=output_size,\n action_high=action_high,\n action_low=action_low)\n if policy_weights_file:\n policy.load_model(policy_weights_file)\n return policy", "def hyperparameters(self):\n hyperparameters = super(TensorFlow, self).hyperparameters()\n\n self.checkpoint_path = self.checkpoint_path or self._default_s3_path('checkpoints')\n mpi_enabled = False\n\n if self._script_mode_enabled():\n additional_hyperparameters = {}\n\n if 'parameter_server' in self.distributions:\n ps_enabled = self.distributions['parameter_server'].get('enabled', False)\n additional_hyperparameters[self.LAUNCH_PS_ENV_NAME] = ps_enabled\n\n if 'mpi' in self.distributions:\n mpi_dict = self.distributions['mpi']\n mpi_enabled = mpi_dict.get('enabled', False)\n additional_hyperparameters[self.LAUNCH_MPI_ENV_NAME] = mpi_enabled\n additional_hyperparameters[self.MPI_NUM_PROCESSES_PER_HOST] = mpi_dict.get('processes_per_host', 1)\n additional_hyperparameters[self.MPI_CUSTOM_MPI_OPTIONS] = mpi_dict.get('custom_mpi_options', '')\n\n self.model_dir = self.model_dir or self._default_s3_path('model', mpi=mpi_enabled)\n additional_hyperparameters['model_dir'] = self.model_dir\n else:\n additional_hyperparameters = {'checkpoint_path': self.checkpoint_path,\n 'training_steps': self.training_steps,\n 'evaluation_steps': self.evaluation_steps,\n 'sagemaker_requirements': self.requirements_file}\n\n hyperparameters.update(Framework._json_encode_hyperparameters(additional_hyperparameters))\n return hyperparameters", "def get_policies_default(config, n_policies, obs_space, act_space, policy_template=\"player_%d\"):\r\n policies = {policy_template % i: get_agent_config(agent_id=i, which=config['_policies'][i],\r\n config=config,\r\n obs_space=obs_space, act_space=act_space)\r\n for i in range(1, 1 + n_policies)}\r\n return policies", "def build_trainer_config(config):\r\n # determining environment parameters\r\n env_fcn = config['_env_fcn']\r\n env = env_fcn(config['_env'])\r\n obs_space, act_space, n_policies = env.observation_space, env.action_space, env.n_policies\r\n env.close()\r\n\r\n policies = config['_get_policies'](config=config, n_policies=n_policies, obs_space=obs_space, act_space=act_space)\r\n select_policy = config['_select_policy']\r\n\r\n config = deepcopy(config)\r\n config['_all_policies'] = sorted(policies.keys())\r\n\r\n if config['_update_withpolicies'] and '_iteration' in config:\r\n config = config['_update_withpolicies'](config, iteration=config['_iteration'])\r\n\r\n config1 = deepcopy(config)\r\n config1['multiagent'] = {}\r\n config1['multiagent']['policies'] = policies\r\n\r\n for k in config['_train_policies']:\r\n assert k in policies.keys(), f\"Unknown policy {k} [range {policies.keys()}]\"\r\n\r\n rl_config = {\r\n \"env\": config['_env_name_rllib'],\r\n \"env_config\": config['_env'],\r\n \"multiagent\": {\r\n \"policies_to_train\": config['_train_policies'],\r\n \"policies\": policies,\r\n \"policy_mapping_fn\": partial(select_policy, config=config1),\r\n },\r\n 'tf_session_args': {'intra_op_parallelism_threads': config['_num_workers_tf'],\r\n 'inter_op_parallelism_threads': config['_num_workers_tf'],\r\n 'gpu_options': {'allow_growth': True},\r\n 'log_device_placement': True,\r\n 'device_count': {'CPU': config['_num_workers_tf']},\r\n 'allow_soft_placement': True\r\n },\r\n \"local_tf_session_args\": {\r\n \"intra_op_parallelism_threads\": config['_num_workers_tf'],\r\n \"inter_op_parallelism_threads\": config['_num_workers_tf'],\r\n },\r\n }\r\n\r\n # filling in the rest of variables\r\n for k, v in config.items():\r\n if k.startswith('_'): continue\r\n rl_config[k] = v\r\n\r\n if config.get('_verbose', True):\r\n print(\"Config:\")\r\n print(pretty_print(rl_config))\r\n\r\n if config['_trainer'] == 'External' and '_tmp_dir' in config:\r\n rl_config['tmp_dir'] = config['_tmp_dir']\r\n \r\n for key, val in rl_config.items():\r\n if isinstance(val, Domain):\r\n sampled_val = val.sample()\r\n rl_config[key] = sampled_val\r\n logging.warning(f\"Trainer got a ray.tune.sample for parameter {key}: {type(val)}({val}). Replacing it with a sampled value {sampled_val}\")\r\n\r\n return rl_config", "def generate_ideal(self):\n return StageParameters(self, *self._ideal_values())", "def init_params():\n p = {}\n \n # p['rootFolder'] = 'C:/Users/Umberto Gostoli/SPHSU/Social Care Model II'\n # p['rootFolder'] = 'N:/Social Care Model Paper III'\n \n p['noPolicySim'] = False\n p['multiprocessing'] = True\n p['numberProcessors'] = 9\n p['numRepeats'] = 3\n \n p['startYear'] = 1860\n p['endYear'] = 2040\n p['thePresent'] = 2012\n p['statsCollectFrom'] = 1990\n p['regressionCollectFrom'] = 1960 \n p['implementPoliciesFromYear'] = 2020\n p['yearOutcome'] = 2015\n \n p['favouriteSeed'] = 123\n p['loadFromFile'] = False\n p['verboseDebugging'] = False\n p['singleRunGraphs'] = False\n p['saveChecks'] = True\n p['getCheckVariablesAtYear'] = 2015\n # To change through command-line arguments\n\n p['numberPolicyParameters'] = 2\n p['valuesPerParam'] = 1\n p['numberScenarios'] = 3\n \n ############ Policy Parameters #######################\n p['incomeCareParam'] = 0.0005 #[0.00025 - 0.001]\n p['taxBreakRate'] = 0.0\n p['ageOfRetirement'] = 65\n p['socialSupportLevel'] = 5\n # p['educationCosts']\n #############################################################\n p['socialCareCreditShare'] = 0.0\n p['maxWtWChildAge'] = 5\n # The basics: starting population and year, etc.\n \n p['discountingFactor'] = 0.03\n \n \n p['initialPop'] = 600 \n \n p['minStartAge'] = 24\n p['maxStartAge'] = 45\n p['numberClasses'] = 5\n p['socialClasses'] = ['unskilled', 'skilled', 'lower', 'middle', 'upper']\n p['initialClassShares'] = [0.2, 0.25, 0.3, 0.2, 0.05]\n p['initialUnemployment'] = [0.25, 0.2, 0.15, 0.1, 0.1]\n p['unemploymentAgeBandParam'] = 0.3\n \n # doDeath function parameters\n p['mortalityBias'] = 0.85 # After 1950\n p['careNeedBias'] = 0.9\n p['unmetCareNeedBias'] = 0.5\n p['baseDieProb'] = 0.0001\n p['babyDieProb'] = 0.005\n p['maleAgeScaling'] = 14.0\n p['maleAgeDieProb'] = 0.00021\n p['femaleAgeScaling'] = 15.5\n p['femaleAgeDieProb'] = 0.00019\n \n p['orphansRelocationParam'] = 0.5\n \n # doBirths function parameters\n p['minPregnancyAge'] = 17\n p['maxPregnancyAge'] = 42\n p['growingPopBirthProb'] = 0.215\n p['fertilityCorrector'] = 1.0\n p['fertilityBias'] = 0.9\n \n # careTransitions function parameters\n p['zeroYearCare'] = 80.0\n p['childcareDecreaseRate'] = 0.25\n p['personCareProb'] = 0.0008\n p['maleAgeCareScaling'] = 18.0 # p['maleAgeCareProb'] = 0.0008\n p['femaleAgeCareScaling'] = 19.0 # p['femaleAgeCareProb'] = 0.0008\n p['baseCareProb'] = 0.0002\n p['careBias'] = 0.9\n p['careTransitionRate'] = 0.7\n\n p['unmetNeedExponent'] = 1.0 # 0.005 #[0.005 - 0.02]\n \n p['numCareLevels'] = 5\n p['careLevelNames'] = ['none','low','moderate','substantial','critical']\n p['careDemandInHours'] = [ 0.0, 8.0, 16.0, 32.0, 80.0 ]\n p['quantumCare'] = 4.0\n \n # careSupplies getCare and probSuppliers function parameters\n \n ######## Key parameter 1 ##############\n \n \n p['weeklyHours'] = 40.0\n \n \n p['priceChildCare'] = 0.76 # 6 \n p['schoolAge'] = 5\n p['maxFormalChildcareHours'] = 48\n p['schoolHours'] = 30\n p['freeChildcareHours'] = 15\n p['workingParentsFreeChildcareHours'] = 30\n p['minAgeStartChildCareSupport'] = 3\n p['minAgeStartChildCareSupportByIncome'] = 2\n p['maxHouseholdIncomeChildCareSupport'] = 40 # 320\n \n ######## Key parameter 2 ##############\n # 5: No public supply \n \n p['retiredHours'] = [48.0, 36.0, 20.0, 10.0] # 60.0\n p['studentHours'] = [24.0, 16.0, 8.0, 4.0]\n p['teenAgersHours'] = [16.0, 0.0, 0.0, 0.0]\n p['unemployedHours'] = [32.0, 24.0, 16.0, 8.0]\n p['employedHours'] = [28.0, 20.0, 12.0, 8.0]\n p['formalCareDiscountFactor'] = 0.5\n \n p['socialNetworkDistances'] = [0.0, 1.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0]\n p['networkDistanceParam'] = 2.0\n p['socialCareWeightBias'] = 1.0\n p['unmetCareNeedDiscountParam'] = 0.5\n p['shareUnmetNeedDiscountParam'] = 0.5\n # p['pastShareUnmetNeedWeight'] = 0.5\n \n \n \n p['networkSizeParam'] = 10.0 # 1.0\n \n p['careSupplyBias'] = 0.5\n p['careIncomeParam'] = 0.001\n \n # Hospitalization Costs\n p['qalyBeta'] = 0.18\n p['qalyAlpha'] = 1.5\n p['qalyDiscountRate'] = 0.035\n p['qalyIndexes'] = [1.0, 0.8, 0.6, 0.4, 0.2]\n p['unmetCareHealthParam'] = 0.1\n p['hospitalizationParam'] = 0.5\n p['needLevelParam'] = 2.0\n p['unmetSocialCareParam'] = 2.0\n p['costHospitalizationPerDay'] = 400\n \n # ageTransitions, enterWorkForce and marketWage functions parameters\n p['ageTeenagers'] = 12\n p['minWorkingAge'] = 16\n \n ######## Key parameter 3 ##############\n \n p['careBankingSchemeOn'] = False\n p['socialCareBankingAge'] = 65\n \n p['absoluteCreditQuantity'] = False\n p['quantityYearlyIncrease'] = 0.0\n p['socialCareCreditQuantity'] = 0\n p['kinshipNetworkCarePropension'] = 0.5\n p['volunteersCarePropensionCoefficient'] = 0.01\n p['pensionContributionRate'] = 0.05\n \n p['hillHealthLevelThreshold'] = 3\n p['seriouslyHillSupportRate'] = 0.5\n \n ### Prices ####\n p['pricePublicSocialCare'] = 20.0 # [2.55] # 20\n p['priceSocialCare'] = 17.0 # [2.29] # 18\n p['taxBrackets'] = [663, 228, 0] # [28.16, 110.23] # [221, 865]\n p['taxBandsNumber'] = 3\n p['bandsTaxationRates'] = [0.4, 0.2, 0.0] # [0.0, 0.2, 0.4]\n # Tax Break Policy\n\n \n p['pensionWage'] = [5.0, 7.0, 10.0, 13.0, 18.0] # [0.64, 0.89, 1.27, 1.66, 2.29] # \n p['incomeInitialLevels'] = [5.0, 7.0, 9.0, 11.0, 14.0] #[0.64, 0.89, 1.15, 1.40, 1.78] # \n p['incomeFinalLevels'] = [10.0, 15.0, 22.0, 33.0, 50.0] #[1.27, 1.91, 2.80, 4.21, 6.37] # \n p['educationCosts'] = [0.0, 100.0, 150.0, 200.0] #[0.0, 12.74, 19.12, 25.49] # \n \n # Priced growth #####\n p['wageGrowthRate'] = 1.0 # 1.01338 # \n\n p['incomeGrowthRate'] = [0.4, 0.35, 0.35, 0.3, 0.25]\n \n # SES inter-generational mobility parameters\n p['leaveHomeStudentsProb'] = 0.5\n \n p['eduWageSensitivity'] = 0.2 # 0.5\n p['eduRankSensitivity'] = 3.0 # 5.0\n p['costantIncomeParam'] = 80.0 # 20.0\n p['costantEduParam'] = 10.0 # 10.0\n p['careEducationParam'] = 0.005 # 0.04\n \n \n \n # p['incEduExp'] = 0.25\n p['educationLevels'] = ['GCSE', 'A-Level', 'HND', 'Degree', 'Higher Degree']\n p['workingAge'] = [16, 18, 20, 22, 24]\n \n # doDivorce function parameters\n p['basicDivorceRate'] = 0.06\n p['variableDivorce'] = 0.06\n p['divorceModifierByDecade'] = [ 0.0, 1.0, 0.9, 0.5, 0.4, 0.2, 0.1, 0.03, 0.01, 0.001, 0.001, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n p['divorceBias'] = 1.0\n \n # doMarriages function parameters\n p['deltageProb'] = [0.0, 0.1, 0.25, 0.4, 0.2, 0.05]\n p['incomeMarriageParam'] = 0.025\n p['studentFactorParam'] = 0.5\n ######## Key parameter 4 ##############\n p['betaGeoExp'] = 2.0 #[1.0 - 4.0]\n \n p['betaSocExp'] = 2.0\n p['rankGenderBias'] = 0.5\n p['basicMaleMarriageProb'] = 0.9\n p['maleMarriageModifierByDecade'] = [ 0.0, 0.16, 0.5, 1.0, 0.8, 0.7, 0.66, 0.5, 0.4, 0.2, 0.1, 0.05, 0.01, 0.0, 0.0, 0.0, 0.0 ]\n \n # jobMarket, updateWork and unemploymentRate functions parameters\n p['unemploymentClassBias'] = 0.75\n p['unemploymentAgeBias'] = [1.0, 0.55, 0.35, 0.25, 0.2, 0.2]\n p['numberAgeBands'] = 6\n p['jobMobilitySlope'] = 0.004\n p['jobMobilityIntercept'] = 0.05\n p['ageBiasParam'] = [7.0, 3.0, 1.0, 0.5, 0.35, 0.15]\n p['deltaIncomeExp'] = 0.05\n p['unemployedCareBurdernParam'] = 0.025\n # Potential key parameter\n p['relocationCareLossExp'] = 1.0 # 40.0 # \n p['incomeSocialCostRelativeWeight'] = 0.5\n \n p['firingParam'] = 0.2\n p['wageVar'] = 0.06\n p['workDiscountingTime'] = 0.75 # 0.8\n p['sizeWeightParam'] = 0.7\n p['minClassWeightParam'] = 1.0\n p['incomeDiscountingExponent'] = 4.0\n p['discountingMultiplier'] = 2.0\n #p['incomeDiscountingParam'] = 2.0\n \n # relocationPensioners function parameters\n p['agingParentsMoveInWithKids'] = 0.1\n p['variableMoveBack'] = 0.1\n p['retiredRelocationParam'] = 0.001 # 0.005\n \n # houseMap function parameters\n p['geoDistanceSensitivityParam'] = 2.0\n p['socDistanceSensitivityParam'] = 2.0\n p['classAffinityWeight'] = 4.0\n p['distanceSensitivityParam'] = 0.5\n \n # relocationProb function parameters\n p['baseRelocatingProb'] = 0.05\n p['relocationParameter'] = 1.0 \n p['apprenticesRelocationProb'] = 0.5\n #p['expReloc'] = 1.0\n \n # computeRelocationCost and relocation Propensity functions parameters\n p['yearsInTownSensitivityParam'] = 0.5\n \n ######## Key parameter 5 ##############\n p['relocationCostParam'] = 0.5 # 1.0 \n \n ######## Key parameter 6 ##############\n p['propensityRelocationParam'] = 2.0 # 2.0 \n p['denRelocationWeight'] = 0.5\n \n \n ## Description of the map, towns, and houses\n p['mapGridXDimension'] = 8\n p['mapGridYDimension'] = 12 \n p['townGridDimension'] = 70\n p['cdfHouseClasses'] = [ 0.6, 0.9, 5.0 ]\n p['ukMap'] = [[ 0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.1, 0.1, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.2, 1.0, 0.5, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.4, 0.0, 0.2, 0.2, 0.4, 0.0, 0.0, 0.0 ],\n [ 0.6, 0.0, 0.0, 0.3, 0.8, 0.2, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, 0.6, 0.8, 0.4, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.2, 1.0, 0.8, 0.6, 0.1, 0.0 ],\n [ 0.0, 0.0, 0.1, 0.2, 1.0, 0.6, 0.3, 0.4 ],\n [ 0.0, 0.0, 0.5, 0.7, 0.5, 1.0, 1.0, 0.0 ],\n [ 0.0, 0.0, 0.2, 0.4, 0.6, 1.0, 1.0, 0.0 ],\n [ 0.0, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0 ]]\n p['ukClassBias'] = [[ 0.0, -0.05, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, -0.05, -0.05, 0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],\n [ 0.0, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.0, -0.05, 0.0, -0.05, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, -0.05, 0.0, 0.2, 0.15, 0.0 ],\n [ 0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.15, 0.0 ],\n [ 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]\n p['mapDensityModifier'] = 0.6\n # p['numHouseClasses'] = 3\n # p['houseClasses'] = ['small','medium','large']\n \n ## Graphical interface details\n p['interactiveGraphics'] = False #True\n p['delayTime'] = 0.0\n p['screenWidth'] = 1300\n p['screenHeight'] = 700\n p['bgColour'] = 'black'\n p['mainFont'] = 'Helvetica 18'\n p['fontColour'] = 'white'\n p['dateX'] = 70\n p['dateY'] = 20\n p['popX'] = 70\n p['popY'] = 50\n p['pixelsInPopPyramid'] = 2000\n p['num5YearAgeClasses'] = 28\n p['careLevelColour'] = ['blue','green','yellow','orange','red']\n p['houseSizeColour'] = ['brown','purple','yellow']\n p['pixelsPerTown'] = 56\n p['maxTextUpdateList'] = 22\n \n # p['eduEduSensitivity'] = 0.5\n # p['mortalityBias'] = [1.0, 0.92, 0.84, 0.76, 0.68]\n # p['fertilityBias'] = [1.0, 0.92, 0.84, 0.76, 0.68]\n # p['divorceBias'] = [2.0, 1.5, 1.0, 0.75, 0.5]\n\n ## Transitions to care statistics\n \n ## Availability of care statistics\n \n #p['childHours'] = 5.0\n # p['employedHours'] = 12.0\n #p['homeAdultHours'] = 30.0\n #p['workingAdultHours'] = 25.0\n #p['maxEmployedHours'] = 60.0\n \n #p['lowCareHandicap'] = 0.5\n #p['hourlyCostOfCare'] = 20.0\n \n ## Fertility statistics\n \n # p['steadyPopBirthProb'] = 0.13\n # p['transitionYear'] = 1965\n \n ## Class and employment statistics\n # p['numClasses'] = 5\n # p['occupationClasses'] = ['lower','intermediate','higher']\n # p['cdfOccupationClasses'] = [ 0.6, 0.9, 1.0 ]\n\n ## Age transition statistics\n # p['ageOfAdulthood'] = 17\n \n ## Marriage function parameters\n \n # p['basicFemaleMarriageProb'] = 0.25\n # p['femaleMarriageModifierByDecade'] = [ 0.0, 0.5, 1.0, 1.0, 1.0, 0.6, 0.5, 0.4, 0.1, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['femaleMarriageProb'] = [0.01, 0.15, 0.3, 0.2, 0.1, 0.1, 0.06, 0.05, 0.02, 0.01, 0.01, 0.005]\n # p['maleMarriageProb'] = [0.005, 0.08, 0.25, 0.25, 0.15, 0.1, 0.07, 0.05, 0.03, 0.02, 0.01, 0.005]\n \n ## Leaving home and moving around statistics\n # p['probApartWillMoveTogether'] = 0.3\n # p['coupleMovesToExistingHousehold'] = 0.3\n # p['basicProbAdultMoveOut'] = 0.22\n # p['probAdultMoveOutModifierByDecade'] = [ 0.0, 0.2, 1.0, 0.6, 0.3, 0.15, 0.03, 0.03, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['basicProbSingleMove'] = 0.05\n # p['probSingleMoveModifierByDecade'] = [ 0.0, 1.0, 1.0, 0.8, 0.4, 0.06, 0.04, 0.02, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['basicProbFamilyMove'] = 0.03\n # p['probFamilyMoveModifierByDecade'] = [ 0.0, 0.5, 0.8, 0.5, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]\n\n \n return p", "def get_prob_params():\n prob = Namespace()\n prob.study_name = STUDY_NAME\n if IS_DEBUG:\n prob.num_trials = 3\n prob.max_capital = 10\n else:\n prob.num_trials = NUM_TRIALS\n prob.max_capital = MAX_CAPITAL\n # Common\n prob.time_distro = TIME_DISTRO\n prob.num_workers = NUM_WORKERS\n _study_params = {\n 'branin': ('synthetic/branin/config_mf.json',\n branin_mf, cost_branin_mf, 0.1, 0, 1),\n 'hartmann3_2': ('synthetic/hartmann3_2/config_mf.json',\n hartmann3_2_mf, cost_hartmann3_2_mf, 0.1, 0, 1),\n 'hartmann6_4': ('synthetic/hartmann6_4/config_mf.json',\n hartmann6_4_mf, cost_hartmann6_4_mf, 0.1, 0, 1),\n 'borehole_6': ('synthetic/borehole_6/config_mf.json',\n borehole_6_mf, cost_borehole_6_mf, 1, 0, 1),\n 'park2_4': ('synthetic/park2_4/config_mf.json',\n park2_4_mf, cost_park2_4_mf, 0.3, 0, 1),\n 'park2_3': ('synthetic/park2_3/config_mf.json',\n park2_3_mf, cost_park2_3_mf, 0.1, 0, 1),\n 'park1_3': ('synthetic/park1_3/config_mf.json',\n park1_3_mf, cost_park1_3_mf, 0.5, 0, 1),\n }\n (domain_config_file_suffix, raw_func, raw_fidel_cost_func, _fc_noise_scale,\n _initial_pool_size, _) = _study_params[prob.study_name]\n domain_config_file = os.path.join(DRAGONFLY_EXPERIMENTS_DIR, domain_config_file_suffix)\n # noisy\n prob.noisy_evals = NOISY_EVALS\n if NOISY_EVALS:\n noise_type = 'gauss'\n noise_scale = _fc_noise_scale\n else:\n noise_type = 'no_noise'\n noise_scale = None\n # Create domain, function_caller and worker_manager\n config = load_config_file(domain_config_file)\n func_caller = get_multifunction_caller_from_config(raw_func, config,\n raw_fidel_cost_func=raw_fidel_cost_func, noise_type=noise_type,\n noise_scale=noise_scale)\n # Set max_capital\n if hasattr(func_caller, 'fidel_cost_func'):\n prob.max_capital = prob.max_capital * \\\n func_caller.fidel_cost_func(func_caller.fidel_to_opt)\n else:\n prob.max_capital = prob.max_capital\n # Store everything in prob\n prob.func_caller = func_caller\n prob.worker_manager = SyntheticWorkerManager(prob.num_workers,\n time_distro='caller_eval_cost')\n prob.save_file_prefix = prob.study_name + ('-debug' if IS_DEBUG else '')\n prob.methods = METHODS\n prob.save_results_dir = SAVE_RESULTS_DIR\n prob.reporter = get_reporter('default')\n # evaluation options\n prob.evaluation_options = Namespace(prev_eval_points='none',\n initial_pool_size=_initial_pool_size)\n return prob", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['p'] = self.p\n return paramDict" ]
[ "0.6997905", "0.6333678", "0.6001725", "0.6001725", "0.58902913", "0.5871921", "0.58525336", "0.57313406", "0.5689953", "0.56655127", "0.56550616", "0.5647602", "0.5634601", "0.5625977", "0.5622746", "0.55883676", "0.55883676", "0.55755997", "0.5521223", "0.5510471", "0.5508604", "0.5501932", "0.55018175", "0.5497828", "0.54825854", "0.5461259", "0.5456514", "0.5449794", "0.5448091", "0.5429253" ]
0.7477268
0
Generate the behavioural policy based on the given parameters and the distribution family of this actor.
def _gen_behaviour(self, params: Tensor) -> Distribution: # TODO: check for parameter size mismatches # TODO: support params being for multiple different distributions if len(params.size()) == 1: params = params.unsqueeze(0) elif len(params.size()) > 2: # FIXME: better error message raise ValueError("unknown dimensionality") if self.settings.dist is Categorical: return Categorical(logits=params) if self.settings.dist is Normal: return Normal(params[:, 0], params[:, 1]) if self.settings.dist is MultivariateNormal: half = params.size()[1] // 2 return MultivariateNormal(params[:, :half], diag_embed(softplus(params[:, half:]))) raise NotImplementedError("actors do not support this action distribution yet")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def act(self, state: State) -> Distribution:\n return self._gen_behaviour(self._gen_policy_params(state))", "def create_greedy_policy(self):\n\n def policy_fn(state):\n return self.actor_baseline.predict([[state]])[0][0]\n\n return policy_fn", "def make_decision_with_policy(self, policy_type, *args):\n if policy_type == 1: # ADP\n assert len(args) == 2, 'args should be exactly 2'\n cur_K = -self.K_im_traj[-1]\n distance_2_tan, radian_at_tan = args\n self.dis_sum += distance_2_tan\n pwm_l_new, pwm_r_new = policy.adp(distance_2_tan, radian_at_tan, self.dis_sum, cur_K)\n elif policy_type == 2: # pure pursuit\n l_d, sin_alpha = args\n amp = 150\n pwm_l_new, pwm_r_new = policy.pure_pursuit(l_d, sin_alpha, amp)\n elif policy_type == 3: # Car following with ADP\n assert len(args) == 3, 'args should be exactly 3'\n cur_K = -self.K_im_traj[-1]\n distance_2_tan, radian_at_tan, estimated_dis = args\n self.dis_sum += distance_2_tan\n if self.is_recording and self.counter % 100 == 0:\n np.save('./.out/record', self.record)\n pwm_l_new, pwm_r_new = policy.car_following_with_adp(distance_2_tan, radian_at_tan, self.dis_sum, cur_K, estimated_dis, self.record)\n print(self.counter)\n self.counter += 1\n elif policy_type == 4:\n K = 0.5\n dis2car, = args\n pwm_l_new, pwm_r_new = policy.car_following(dis2car, K)\n elif policy_type == 5:\n d_arc, d_curve, theta = args\n pwm_l_new, pwm_r_new = policy.adp_coupled_car_following(d_arc, d_curve, theta, self.z, self.K_coupled)\n else:\n pwm_l_new, pwm_r_new = 0, 0\n print('Policy Not Found')\n self.motor.motor_set_new_speed(pwm_l_new, pwm_r_new)", "def get_behaviour_policy(self, state):\n return self.behaviour_policy(state)", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n a = self.config['alpha']\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n weights = self.weights[state]\n # print(weights)\n\n\n # obtains the probability vector from Hedge: p_i(t) = (1+alpha)^s_i(t) / sum_{j \\in K} (1+alpha)^s_j(t)\n sum_weights_exponentials = sum([(1 + a) ** w for w in weights])\n pre_prob = [(((1 + a) ** w) / sum_weights_exponentials) for w in weights]\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * p) + (g / n) for p in pre_prob]\n\n return pi_s", "def _gen_policy_params(self, state: State) -> Tensor:\n ...", "def policy(agent):", "def probability(self, agent1, agent2, **attributes):\n if('age_difference' in attributes and 'mean_age' in attributes):\n age_difference = attributes['age_difference']\n mean_age = attributes['mean_age']\n else:\n agent1_age = self.grid_queues[agent1.grid_queue].age()\n agent2_age = self.grid_queues[agent2.grid_queue].age()\n mean_age = (agent1_age + agent2_age) / 2.0\n age_difference = agent2_age - agent1_age\n \n pad = (1 - (2*agent1.sex))* self.PREFERRED_AGE_DIFFERENCE # correct for perspective\n age_abs = abs(age_difference-(pad*self.PREFERRED_AGE_DIFFERENCE_GROWTH*mean_age))\n age_probability = np.exp(self.AGE_PROBABILITY_MULTIPLIER*age_abs)\n \n sb_abs = abs(agent1.sexual_behavior-agent2.sexual_behavior)\n sb_probability = np.exp(self.SB_PROBABILITY_MULTIPLIER*sb_abs)\n p = (agent1.sex ^ agent2.sex)*age_probability*sb_probability\n #print \"name\",agent1.name,\"age\", round(self.age(agent1),2),\"sex\",agent1.sex,\"sb\",agent1.sexual_behavior,\n #print \"| name\",agent2.name,\"age\", round(self.age(agent2),2),\"sex\",agent2.sex,\"sb\",agent2.sexual_behavior,\n #print \"| age_p = \", round(age_probability,2), \"sb_p\", round(sb_probability,2), \"p\", round(p,2)\n return p", "def elbo_with_policy(self, rng, params, x, policy, train, context=None):\n d = np.prod(x.shape[1:])\n batch_size = x.shape[0]\n\n rng_perm, rng_t, rng_drop = jax.random.split(rng, 3)\n\n # Get random sigma ~ Unif(S_n_steps)\n sigmas = ardm_utils.get_batch_permutations(rng_perm, x.shape[0],\n self.num_steps)\n\n # Sample t from policy.\n t, _, weight_policy = self.sample_policy_t(rng_t, batch_size, policy)\n\n prev_selection, _ = ardm_utils.get_selection_for_sigma_and_t(\n sigmas, t, self.config.mask_shape)\n future_selection = (1. - prev_selection)\n\n corrupted = self.corrupt(x, prev_selection)\n\n net_out = self.apply_fn(\n {'params': params}, corrupted, t, prev_selection, train,\n rngs={'dropout': rng_drop} if train else None, context=context)\n\n log_px_sigma_geq_t = self.logprob_fn(x, net_out)\n\n log_px_sigma_geq_t = future_selection.reshape(\n log_px_sigma_geq_t.shape) * log_px_sigma_geq_t\n log_px_sigma_geq_t = util_fns.sum_except_batch(log_px_sigma_geq_t)\n\n ce = log_px_sigma_geq_t / d / np.log(2)\n\n # Reweigh for expectation over i.\n reweighting_factor_expectation_i = 1. / (self.num_steps - t)\n elbo_per_t = reweighting_factor_expectation_i * log_px_sigma_geq_t\n\n # Reweigh for expectation over policy.\n elbo = elbo_per_t * weight_policy\n\n elbo = elbo / d / np.log(2)\n elbo_per_t = elbo_per_t / d / np.log(2)\n\n return elbo, elbo_per_t, ce, t", "def _gen_policy_params(self, state: State) -> Tensor:\n return self.network(state)", "def get_policy_encode(self, state, action, encodes):\n policy = np.zeros(self.action_dim, dtype=np.float32)\n\n G_policy = \\\n self.sess.run(\n self.action_dist_mu,\n {self.state: state, self.encodes: encodes}\n # {state, encodes}\n )\n\n mu = copy.copy(G_policy[0])\n\n sigma = [[np.exp(-3.0), 0.0], [0.0, np.exp(-3.0)]]\n\n policy[0] = multivariate_normal.pdf(action, mu, sigma)\n\n return policy", "def add_policy(self, policy, epochs=None, starting_epoch=0, ending_epoch=1, frequency=1):\n\n if epochs is None:\n epochs = list(range(starting_epoch, ending_epoch, frequency))\n\n for epoch in epochs:\n if epoch not in self.policies:\n self.policies[epoch] = [policy]\n else:\n self.policies[epoch].append(policy)\n assert len(self.policies[epoch]) > 0\n \n self.sched_metadata[policy] = {'starting_epoch': starting_epoch,\n 'ending_epoch': ending_epoch,\n 'frequency': frequency}\n\n class_name = policy.__class__.__name__.split(\"Policy\")[0]\n \n if \"Remover\" in class_name:\n self.thinning = True\n self.thinning_epoch = epochs\n\n # In the following code, we save the maximum and minimum epochs withing all pruners.\n # This is designed for distingushing the \"pretrain\", \"ADMM pruning\" and \"retrain\" phase. \n # Toward this end, we are able to tune the initial learning rate in an automative way.\n if class_name in ['ADMM', \"Pruning\"]:\n self.prune_mechanism = True\n if 'max_epoch' in self.pruner_info:\n if ending_epoch > self.pruner_info['max_epoch']:\n self.pruner_info['max_epoch'] = ending_epoch\n else:\n self.pruner_info['max_epoch'] = ending_epoch\n \n if class_name == 'ADMM':\n self.admm_prune = True\n # Can not deal with seperate ADMM pruner.\n self.pruner_info[\"ADMM_epoch\"] = ending_epoch\n\n if 'min_epoch' in self.pruner_info:\n if starting_epoch < self.pruner_info['min_epoch']:\n self.pruner_info['min_epoch'] = starting_epoch\n else:\n self.pruner_info['min_epoch'] = starting_epoch", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n\n sum_weights = sum(self.weights[s])\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * w / sum_weights) + (g / n) for w in self.weights[s]]\n # print(state, pi_s)\n return pi_s", "def log_prob_with_policy_and_sigma(self, rng, params, x, policy, sigmas,\n train, context=None):\n d = np.prod(x.shape[1:])\n batch_size = x.shape[0]\n\n # Expand the dimensions of sigma if only a single order is given.\n if len(sigmas.shape) == 1:\n sigmas = jnp.repeat(sigmas[None], repeats=batch_size, axis=0)\n assert sigmas.shape == (batch_size, self.num_steps), (\n f'{sigmas.shape} does not match')\n\n rng_t, rng_drop = jax.random.split(rng, 2)\n\n # Sample t from policy.\n left_t, right_t, weight_policy = self.sample_policy_t(\n rng_t, batch_size, policy)\n num_tokens_in_parallel = right_t - left_t\n\n prev_selection, current_selection = ardm_utils.get_selections_for_sigma_and_range(\n sigmas, left_t, right_t, self.config.mask_shape)\n\n corrupted = self.corrupt(x, prev_selection)\n\n net_out = self.apply_fn(\n {'params': params}, corrupted, left_t, prev_selection, train,\n rngs={'dropout': rng_drop} if train else None, context=context)\n\n log_px_sigma_geq_t = self.logprob_fn(x, net_out)\n\n current_selection = current_selection.reshape(log_px_sigma_geq_t.shape)\n log_px_sigma_geq_t = current_selection * log_px_sigma_geq_t\n log_px_sigma_geq_t = util_fns.sum_except_batch(log_px_sigma_geq_t)\n\n # Reweigh for expectation over policy.\n log_prob = log_px_sigma_geq_t / num_tokens_in_parallel * weight_policy\n log_prob = log_prob / d / np.log(2)\n\n return log_prob", "def create_policy(self, fn_inputs):\n\n # determine if the policy is already in place\n response, err_msg = self._get_policy_by_sha256(fn_inputs.get('reaqta_sha256'))\n if err_msg:\n return {}, err_msg\n\n policy_info = response.json()\n if policy_info.get('result'):\n return {}, 'A policy already exists for this file hash: {0}. <a href=\"{1}\" target=\"blank\">{1}</a>'.format(\n fn_inputs.get('reaqta_sha256'),\n self.make_linkback_url(policy_info['result'][0]['id'], POLICY_DETAILS))\n\n params = {\n \"sha256\": fn_inputs.get('reaqta_sha256'),\n \"title\": fn_inputs.get('reaqta_policy_title', ''),\n \"description\": fn_inputs.get('reaqta_policy_description', ''),\n \"disable\": not fn_inputs.get('reaqta_policy_enabled', True),\n \"block\": fn_inputs.get('reaqta_policy_block', False),\n \"enabledGroups\": [],\n \"disabledGroups\": []\n }\n\n # collect all the group names and find the groupIds\n if fn_inputs.get('reaqta_policy_included_groups'):\n group_name_list = [ group.strip() for group in fn_inputs.get('reaqta_policy_included_groups', \"\").split(',') ]\n group_id_list = self.get_group_ids(group_name_list)\n if group_id_list:\n params['enabledGroups'] = group_id_list\n\n if fn_inputs.get('reaqta_policy_excluded_groups'):\n group_name_list = [ group.strip() for group in fn_inputs.get('reaqta_policy_excluded_groups', \"\").split(',') ]\n group_id_list = self.get_group_ids(group_name_list)\n if group_id_list:\n params['disabledGroups'] = group_id_list\n\n LOG.debug(\"create_policy: %s\", params)\n url = urljoin(POLICY_URI, \"trigger-on-process-hash\")\n return self.api_call(\"POST\", url, params)", "def bellman_operator(self, P, b, policy=\"behavior\"):\n Q = np.matrix(self.mdp.Q)\n R = np.matrix(self.mdp.R)\n A = np.matrix(self.mdp.A)\n B = np.matrix(self.mdp.B)\n Sigma = np.matrix(np.diag(self.mdp.Sigma))\n\n if policy == \"behavior\":\n theta = np.matrix(self.behavior_policy.theta)\n noise = self.behavior_policy.noise\n if hasattr(self, \"S\"):\n S = self.S\n else:\n S = A + B * theta\n self.S = S\n if hasattr(self, \"C\"):\n C = self.C\n else:\n C = Q + theta.T * R * theta\n self.C = C\n elif policy == \"target\":\n theta = np.matrix(self.target_policy.theta)\n noise = self.target_policy.noise\n if hasattr(self, \"S_target\"):\n S = self.S_target\n else:\n S = A + B * theta\n self.S_target = S\n if hasattr(self, \"C_target\"):\n C = self.C_target\n else:\n C = Q + theta.T * R * theta\n self.C_target = C\n else:\n theta = np.matrix(policy)\n noise = policy.noise\n S = A + B * theta\n C = Q + theta.T * R * theta\n\n Pn = C + self.gamma * (S.T * np.matrix(P) * S)\n bn = self.gamma * (b + np.trace(np.matrix(P) * np.matrix(Sigma))) \\\n + np.trace((R + self.gamma * B.T *\n np.matrix(P) * B) * np.matrix(np.diag(noise)))\n return Pn, bn", "def p(self) -> Probability:\n ...", "def __call__(self, hyperparameters: dict) -> dict:\n result = hyperparameters.copy()\n\n for key, value in self.mutations.items():\n result[key] = value() if callable(value) else random.choice(value)\n\n return result", "def set_strategy(self, policy):\n self.suspicious = policy\n # The rationale is that if a gun policy is active the agent chose more often to use force as a initial strategy\n if self.suspicious:\n self.s_aggressor = random.choices(['Force', 'nForce'], [.7, .4])\n else:\n self.s_aggressor = random.choices(['Force', 'nForce'], [.14, .86])", "def make_random_policy(action_count):\n actions = np.ones(action_count, dtype=float) / action_count\n\n def policy_func(observation):\n return actions\n\n return policy_func", "def get_policies_default(config, n_policies, obs_space, act_space, policy_template=\"player_%d\"):\r\n policies = {policy_template % i: get_agent_config(agent_id=i, which=config['_policies'][i],\r\n config=config,\r\n obs_space=obs_space, act_space=act_space)\r\n for i in range(1, 1 + n_policies)}\r\n return policies", "def main(args: argparse.Namespace, dag: CausalDAG=None, policy=None):\n \n # initialize the environment: create a graph and generate observational \n # samples from the joint distribution of the graph variables\n env = CausalEnv(num_vars=args.num_variables, \n min_categs=args.min_categories,\n max_categs=args.max_categories,\n graph_structure=args.graph_structure,\n edge_prob=args.edge_prob,\n dag=dag)\n \n obs_data = env.reset(n_samples=args.n_obs_samples)\n obs_dataloader = DataLoader(obs_data, batch_size=args.obs_batch_size, shuffle=True, drop_last=True)\n \n device = 'cuda:0' if torch.cuda.is_available() else 'cpu' \n \n # initialize policy learning\n if args.learn_policy:\n policy = MLP(args.num_variables, [512, 256, 128]).float()\n policy = policy.to(device)\n policy_optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)\n rewards_lst = []\n \n for t in range(args.max_episodes):\n policy_optimizer.zero_grad()\n log_probs, reward = train(args, env, obs_dataloader, device, policy)\n \n reward += [0] * (args.epochs - len(reward))\n rewards_lst.append(reward)\n baseline = args.beta_baseline * torch.Tensor(reward) + (1 - args.beta_baseline) * baseline if t != 0 else torch.Tensor(reward)\n\n policy_loss = -torch.sum((torch.Tensor(reward[:len(log_probs)]) - baseline[:len(log_probs)]) * torch.cumsum(torch.tensor(log_probs, requires_grad=True), dim=0))\n \n policy_loss.backward()\n policy_optimizer.step()\n \n print(torch.sum(torch.Tensor(reward)))\n print(torch.mean(torch.sum(torch.tensor(rewards_lst), dim=-1)))\n \n if torch.sum(torch.Tensor(reward)) >= max(torch.sum(torch.tensor(rewards_lst), dim=-1)):\n print('\\nSaving policy...')\n torch.save(policy.state_dict(), 'policy_mlp.pth')\n \n else:\n train(args, env, obs_dataloader, device, policy)", "def create_policy(env, policy_type, policy_weights_file=None):\n input_size = env.observation_space.shape[0]\n output_size = env.action_space.shape[0]\n action_low = env.action_space.low\n action_high = env.action_space.high\n policy = policy_type(input_size=input_size,\n output_size=output_size,\n action_high=action_high,\n action_low=action_low)\n if policy_weights_file:\n policy.load_model(policy_weights_file)\n return policy", "def getPolicy(self):\n # NOTE Naming scheme of POMDP policy files:\n # dai_<worker skill rounded to 0.1>_<reward for incorrect answer>.policy\n # i.e. 'dai_1.0_-100' is the policy for the Dai-AIJ13 pomdp with\n # average worker skill = 1.0 and reward for incorrect answer of -100\n filename = 'dai_%.1f_%s.policy' % (self.average_gamma, self.reward_incorrect)\n if os.path.isdir(self.policy_dir) and os.path.isfile(self.policy_dir + '/' + filename):\n #previously solved\n policy = pomdp_policy.POMDPPolicy(self.policy_dir + '/' + filename, file_format='zmdp', n_states= self.num_states)\n else:\n #have to solve it\n zpomdp = pomdp.ZPOMDP()\n policy = zpomdp.solve(self.pomdp_var.states, self.pomdp_var.actions, self.pomdp_var.observations,\n self.pomdp_var.CLOSURE_f_start(self.num_states),\n self.pomdp_var.f_transition,\n self.pomdp_var.CLOSURE_f_observation(self.average_gamma),\n self.pomdp_var.CLOSURE_f_reward(self.reward_create, self.reward_correct, self.reward_incorrect),\n discount=self.discount, timeout=self.timeout, directory=self.policy_dir)\n\n #save our new policy where we can find it again\n #rename p.policy to filename w/params\n shutil.move(self.policy_dir + '/p.policy', self.policy_dir + '/' + filename)\n return policy", "def policy_improvement(P, nS, nA, value_from_policy, policy, gamma=0.9):\n\n\tnew_policy = np.zeros(nS, dtype='int')\n\n\t############################\n\t# YOUR IMPLEMENTATION HERE #\n\tfor s in range(nS):\n\t\tq_values = np.zeros(nA)\n\t\tfor action in range(nA):\n\t\t\tcurrent_q_value = 0\n\t\t\tfor transition in P[s][action]:\t# for every possible transition\n\t\t\t\t# print(len(P[s][action]))\n\t\t\t\tprobability = transition[0]\n\t\t\t\treward = transition[2]\n\t\t\t\tnext_state = transition[1]\n\t\t\t\tvalue_next_state = value_from_policy[next_state]\n\n\t\t\t\tcurrent_q_value += probability * (reward + gamma * value_next_state)\n\n\t\t\tq_values[action] = current_q_value\n\n\t\tnew_policy[s] = np.argmax(q_values)\n\n\n\t# print(new_policy)\n\t############################\n\treturn new_policy", "def policy_gamble (self):\n\t\tidx = self.idx \t\t\t\t# internal time index of state\n\t\tprobs = self.probs\t\t\t# prob of reward for an action\n\t\tbeta = self.beta\t\t\t# inverse temp \n\n\t\t# softmax\n\t\tAct = beta*self.Q[idx]\n\t\tp = 1./(1. + np.exp(-Act))\t# probability of gamble\n\t\tself.SM[idx] = p\n\n\t\t# decide whether to take gamble based on p\n\t\trnd = np.random.random_sample()\n\t\tif rnd < p:\n\t\t\tC = 1\t# gamble\n\t\telse:\n\t\t\tC = 0\t# no gamble\n\t\tself.C[idx] = C\n\n\t\t# no gamble\n\t\tif C == 0:\t\n\t\t\treward = 0\t\t # gamble reward encoded relative to reward\n\t\t\tself.R[idx] = -1 # rewarded sure thing, coded as -1\n\t\t\tself.PE[idx] = 0 # no PE, get the thing you expected\n\t\t# gamble\n\t\telse:\n\t\t\t# decide whether a reward is delivered\n\t\t\treward = np.random.binomial(size=1, n=1, p=probs)[0]\n\t\t\tself.R[idx] = reward # indicator that reward was received\n\t\t\tif reward == 0:\n\t\t\t\treward = self.l_mag\n\t\t\telse:\n\t\t\t\treward = self.r_mag\n\t\t\tself.PE[idx] = reward - self.Q[idx]", "def arms_probabilities(self):\n self.arms_probability = dict.fromkeys(self.active_arms)\n for i in self.arms_probability:\n self.arms_probability[i] = random.betavariate(self.arms_dict_params[i]['current_alpha'],\n self.arms_dict_params[i]['current_beta'])\n self.arms_dict_params[i]['probability'].append(self.arms_probability[i])\n\n return self.arms_dict_params, self.arms_probability", "def initializeDistribution(self):\n self.convertToDistrDict['Jacobi'] = self.convertJacobiToBeta\n self.convertToQuadDict ['Jacobi'] = self.convertBetaToJacobi\n self.measureNormDict ['Jacobi'] = self.stdProbabilityNorm\n #this \"if\" section can only be called if distribution not generated using readMoreXML\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,self.low)\n else:\n if self.lowerBoundUsed == False:\n a = 0.0\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,a,b,self.low)\n self.preferredPolynomials = 'Jacobi'\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('ClenshawCurtis')", "def policy (self,forced_actions=None,forced_rewards=None,state_idx=None):\n\t\tif self.gamble:\n\t\t\tself.policy_gamble()\n\t\t\treturn\n\t\tif self.UCB:\n\t\t\tself.policy_UCB(forced_actions,forced_rewards,state_idx)\n\t\t\treturn\n\n\t\tidx = self.idx \t\t\t\t# internal time index of state\n\t\tprobs = self.probs\t\t\t# prob of reward for an action\n\t\tbeta = self.beta\t\t\t# inverse temp \n\n\t\t# calc Act thalamus activation\n\t\tAct = beta*self.Q[idx,:]\n\n\t\t# multioption softmax (invariant to constant offsets)\n\t\tnewAct = Act - np.max(Act)\n\t\texpAct = np.exp(newAct)\n\t\tps = expAct/np.sum(expAct)\n\t\tself.SM[idx,:] = ps\n\t\tcs_ps = np.cumsum(ps)\n\n\t\t# select action\n\t\tif forced_actions is None:\n\t\t\tsample = np.random.random_sample()\n\t\t\tselected = False\n\t\t\tcheck = 0\n\t\t\twhile not selected:\n\t\t\t\tif sample < cs_ps[check]:\n\t\t\t\t\tC = check\n\t\t\t\t\tselected = True\n\t\t\t\telse:\n\t\t\t\t\tcheck = check + 1\n\t\telse:\n\t\t\tC = forced_actions[state_idx,idx]\n\t\tself.C[idx] = C\n\t\t\t\n\t\t# decide whether a reward is delivered\n\t\tif forced_rewards is None:\n\t\t\treward = np.random.binomial(size=1, n=1, p= probs[C])[0]\n\t\telse:\n\t\t\treward = forced_rewards[state_idx,idx]\n\t\tself.R[idx] = reward # indicator that reward was received\n\t\tif reward == 0:\n\t\t\treward = self.l_mag\n\t\telse:\n\t\t\treward = self.r_mag\n\n\t\tPE = reward - self.Q[idx,C]\n\t\tself.PE[idx] = PE", "def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))" ]
[ "0.69731206", "0.65240955", "0.60581094", "0.5933542", "0.58577895", "0.58535635", "0.58448905", "0.5797706", "0.5698496", "0.5630963", "0.5575867", "0.55735415", "0.5571977", "0.5564916", "0.553127", "0.5499231", "0.5448355", "0.5441939", "0.54395163", "0.5424341", "0.53919214", "0.5391298", "0.539106", "0.53472215", "0.5343767", "0.53367686", "0.53160614", "0.52705", "0.52587", "0.52392805" ]
0.6820135
1
Generate the PCL document printing a same line with various Horizontal Motion Index values (HMI).
def print_hmi_demo( printer_socket ): print( 'Horizontal Motion Index demo' ) print( '----------------------------' ) print( 'Printer IP: %s\nPrinter Port: %i' % printer_socket ) medium = PrinterSocketAdapter( printer_socket ) # Very simple printout + usual initialization commands d = HpPclDocument( 'cp850', medium) d.reset_printer() # PCL to reset the printer d.symbol_set() # Set the default symbol set (PC-850) d.paper_source() # Set the default paper source to Tray + eject current page if any. d.pitch( 10 ) # Set 10 cpi size characters vmi_start = 6 # Value corresponding to pitch = 10 cpi. # How to calculate the default/starting VMI value # (see technical reference on Vertical Motion Index) # # Common setting: # vmi = 7.27 -> allow to print 66 lines on a portrait page # (with a half inch margin on the top and bottom) # (10 inch-height / 66 lines-per-page ) x 48 = 7.27 # # vmi = 5.45 -> allow to print 66 lines on a landscape letter page # (with a half inch margin on the top and bottom) # # (7.5 inch-heigh / 66 lines-per-page ) x 48 = 5.45 # # vmi = 6 -> allow to print up to 84 lignes on a A4 page! # should be verified! # 29.5cm - 1.1cm margin top - 1.7cm margin bottom = 26.7cm # 26.7cm / 2.54 cm-per-inch = 10.51 inch # # vmi = 11.51 inch / 84 lines-per-page * 48 = 6.57 # # vmi = 8 -> allow to print up to 63 lines on a A4 page! # should be verified # # 29.5cm - 1.1cm margin top - 1.7cm margin bottom = 26.7cm # 26.7cm / 2.54 cm-per-inch = 10.51 inch # # vmi = 11.51 inch / 63 lines-per-page * 48 = 8.007 # d.vertical_motion_index( 0 ) # Disable d.writeln( u'This is a demo about the usage of VMI' ) d.writeln( u'and how it can impact your print out' ) d.writeln() d.vertical_motion_index( 6 ) # Set the value to 8 lines/inch # 84 lines-per-page d.writeln( u'But this better to have a proper value' ) d.writeln( u'when printing stuff' ) # Increasing the vertical motion index to put more lines in the # same space. for vmi_value in range( vmi_start, vmi_start + 10 ): d.vertical_motion_index( vmi_value ) d.writeln( u'0123456789abcdef' ) medium.open() # Open the media for transmission try: d.send() # Send the content of the current document finally: medium.close() del( d ) del( medium )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writePOVRAYHeader(self, fh):\n settings = self.mainWindow.preferences.povrayForm\n\n focalPoint = self.camera.GetFocalPoint()\n campos = self.camera.GetPosition()\n viewup = self.camera.GetViewUp()\n angle = settings.viewAngle\n if settings.shadowless:\n shadowless = \"shadowless \"\n else:\n shadowless = \"\"\n\n if self.parent.blackBackground:\n rval = gval = bval = 0\n else:\n rval = gval = bval = 1\n\n fh.write(\"camera { perspective location <%f,%f,%f>\\n\" % (- campos[0], campos[1], campos[2]))\n fh.write(\" look_at <%f,%f,%f>\\n\" % (- focalPoint[0], focalPoint[1], focalPoint[2]))\n fh.write(\" angle %f\\n\" % angle)\n fh.write(\" sky <%f,%f,%f> }\\n\" % (- viewup[0], viewup[1], viewup[2]))\n fh.write(\"light_source { <%f,%f,%f> color rgb <1,1,1> %s }\\n\" % (- campos[0], campos[1], campos[2], shadowless))\n fh.write(\"background { color rgb <%f,%f,%f> }\\n\" % (rval, gval, bval))", "def generate_template_trophy(h1, h2, w, drawing):\n drawing.add(dxf.line((0, 0), (600,0), color=255, layer='LINES', thickness=0.00))\n drawing.add(dxf.line((600, 0), (600,450), color=255, layer='LINES', thickness=0.00))\n drawing.add(dxf.line((600,450), (0,450), color=255, layer='LINES', thickness=0.00))\n drawing.add(dxf.line((0,450), (0,0), color=255, layer='LINES', thickness=0.00))\n refpoint = generate_ref_trophy(h1, h2, w)\n for i in refpoint[:4]:\n x,y = i\n draw(x,y,x+h1,y,drawing)\n draw(x,y,x,y-w,drawing)\n draw(x,y-w,x+h2,y-w,drawing)\n draw(x+h1,y,x+h2,y-w,drawing)\n for i in refpoint[4:8]:\n x,y=i\n draw(x,y,x-h1,y,drawing)\n draw(x,y,x,y+w,drawing)\n draw(x,y+w,x-h2,y+w,drawing)\n draw(x-h2,y+w,x-h1,y,drawing)\n x,y = refpoint[-2]\n draw(x,y,x,y+h1,drawing)\n draw(x,y,x+w,y,drawing)\n draw(x+w,y,x+w,y+h2,drawing)\n draw(x+w,y+h2,x,y+h1,drawing)\n x,y = refpoint[-1]\n draw(x,y,x,y-h1,drawing)\n draw(x,y,x-w,y,drawing)\n draw(x-w,y,x-w,y-h2,drawing)\n draw(x-w,y-h2,x,y-h1,drawing)", "def header(self):\n print 'dimensions',self.data.shape\n print 'llcorner', self.xllcorner, self.yllcorner\n print 'cell size', self.cellsize", "def writeHeading(fil, nodes, elems, text=''): #currently only for hexahedral mesh\n fil.write(\" CONTROL INFO 2.2.30\\n\")\n fil.write(\"** GAMBIT NEUTRAL FILE\\n\")\n fil.write('%s\\n' %text)\n fil.write('PROGRAM: Gambit VERSION: 2.2.30\\n')\n fil.write(strftime('%d %b %Y %H:%M:%S\\n', gmtime()))\n fil.write(' NUMNP NELEM NGRPS NBSETS NDFCD NDFVL\\n')\n fil.write('%10i%10i%10i%10i%10i%10i\\n' % (shape(nodes)[0],shape(elems)[0],1,0,3,3))\n fil.write('ENDOFSECTION\\n')", "def print_headings(self):\n hdg_list = sorted(self.data.keys())\n sys.stdout.write('Offset: %.1f; ' % self.offset)\n sys.stdout.write('Magnetic Declination: %.2f\\n' % np.rad2deg(self.mag_var))\n # get maximum length of row headers for lining up everything\n max_len = max(map(lambda x: len(x[0]), PRINT_ROW_INFO))\n while hdg_list:\n # this part ensures printing only 6 columns at a time to prevent\n # text from wrapping when printed to a terminal\n if len(hdg_list) > 6:\n last = 6\n else:\n last = len(hdg_list)\n hdgs = hdg_list[0:last]\n # pop the headings used in HDGS out of HDG_LIST\n hdg_list[0:last] = []\n\n # Printing handled\n for row_header, fmt, dat_key in PRINT_ROW_INFO:\n # print row header\n lead_space = ' ' * (max_len - len(row_header))\n sys.stdout.write(lead_space + row_header)\n # print row data\n #pdb.set_trace()\n for hdg in hdgs:\n sys.stdout.write(' '+fmt % self.data[hdg][dat_key])\n sys.stdout.write('\\n')\n # print sample data gathered\n lead_space = ' ' * (max_len - 5)\n sys.stdout.write(lead_space + 'Data:')\n for ii in range(self.n_samples):\n if ii > 0:\n sys.stdout.write(' ' * max_len)\n for hdg in hdgs:\n comp_dat = self.data[hdg]['compass_sample_rad'][ii]\n sys.stdout.write(' %6.2f' % comp_dat)\n sys.stdout.write('\\n')\n sys.stdout.write('\\n') # add a line between sections", "def _render_horizontal(self, gc, lx, ly, rx, ry, mx, my):\n\n with gc:\n gc.set_line_width(20)\n gc.set_stroke_color(self._get_border_color())\n tee_h(gc, lx, ly, mx, my, ry)\n\n gc.set_line_width(10)\n self.set_fill_color(gc)\n tee_h(gc, lx, ly, mx, my, ry)", "def make_four_pdf(args):\n params = make_four_params(args)\n m4_filename = params['m4_filename']\n prefix = params['prefix']\n min_matching_length = params['min_matching_length']\n output_prefix = params['output_prefix']\n\n # if there are fewer than threshold reads then skip it\n threshold = 25 # threshold before plotting.\n if len(open(m4_filename).readlines()) < threshold:\n print('skipping %s because it has %d lines' % (\n m4_filename,\n len(open(m4_filename).readlines()))\n )\n return\n\n plb.rcParams['figure.figsize'] = 30, 30\n plt.clf()\n plt.figure(1)\n\n remove_punctuation = lambda x: ''.join(e for e in x if e.isdigit() or e == '.')\n coords = [int(remove_punctuation(a)) for a in prefix.split('_')[1:3]]\n dist = coords[1] - coords[0]\n\n graph = generate_graph(params)\n preset, postset, spanset, gapset = get_read_classifications(params)\n # Draw Ground Truth\n plt.subplot(2, 3, 1)\n node_colors = node_set_colors(graph.nodes(), spanset, gapset, preset, postset)\n pos = nx.spring_layout(graph)\n\n assert(len(node_colors) == nx.number_of_nodes(graph))\n title = \"Chr {0}; L={1}; Ground Truth Colors\\n\\\n Red=Preset, Yellow=Postset, Blue=GapSet, Green=SpanSet\\n\\\n num_edges = {2}\\\n \"\\\n .format(prefix, min_matching_length, nx.number_of_edges(graph))\n nx.draw_spring(graph, node_color=node_colors, node_size=100)\n #nx.draw(graph, node_color=node_colors, node_size=100, pos=pos)\n plt.title(title)\n\n # Draw histogram of smith waterman scores and remove bad edges\n\n # squash preset and postset nodes\n graph = nx_helpers.remove_nodes(graph, preset)\n graph = nx_helpers.remove_nodes(graph, postset)\n\n # filter nodes by smith_waterman\n with utils.Timer(\"smith_waterman_filter\"):\n flanking_reads = preset.union(postset)\n # subplots 2 and 3 occur in smith_waterman_filter\n graph = smith_waterman_filter(graph, flanking_reads, params)\n\n # Draw groudn truth with squashed nodes\n plt.subplot(2, 3, 4)\n node_colors = node_set_colors(graph.nodes(), spanset, gapset, preset, postset)\n assert(len(node_colors) == nx.number_of_nodes(graph))\n title = \"Chr {0}; L={1}; Ground Truth Colors \\n\\\n Removed Preset and Postsetnodes; Blue=GapSet, Green=SpanSet\\n\\\n number of edges = {2}\"\\\n .format(prefix, min_matching_length, nx.number_of_edges(graph))\n nx.draw_spring(graph, node_color=node_colors, node_size=100)\n #nx.draw(graph, node_color=node_colors, node_size=100, pos=pos)\n plt.title(title)\n\n # Drop Small Communities and Draw\n plt.subplot(2, 3, 5)\n communities = nx_helpers.get_communities(graph)\n graph, communities = drop_small_communities(graph, communities)\n node_colors = node_community_colors(graph, communities)\n assert(len(node_colors) == nx.number_of_nodes(graph))\n title = \"Chr {0}; L={1}; After Removing Small Communities; NumCom={2}\\n\\\n ComQual={3}, MapQual={4}\\n\\\n number of edges = {5}\"\\\n .format(prefix, min_matching_length, len(communities),\n community_quality(communities, spanset, gapset),\n mapping_quality(graph, spanset, gapset),\n nx.number_of_edges(graph))\n #nx.draw(graph, node_color=node_colors, node_size=100, pos=pos)\n nx.draw_spring(graph, node_color=node_colors, node_size=100)\n plt.title(title)\n\n # IGV Line Plot\n plt.subplot(2, 3, 6)\n make_line_plot((spanset, gapset, preset, postset), params)\n\n plt.savefig(output_prefix + '_figs/%s-communities.pdf' % (prefix))\n\n ret_string = '%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\tchr%s_slop5000.png\\t%s-communities.pdf' % (\n prefix,\n prefix.split('_')[0],\n coords[0],coords[1],coords[1]-coords[0],\n len(communities),\n community_quality(communities, spanset, gapset),\n mapping_quality(graph, spanset, gapset),\n prefix,prefix\n )\n\n return ret_string", "def process(self):\n lines = cv.HoughLinesP(self.input_image, 1, np.pi/180, 100, 10, 150, 400)\n for x1,y1,x2,y2 in lines[0]:\n cv.line(self.output_image,(x1,y1),(x2,y2),(0,255,0),2)\n cv.line(self.output_image, (0,0), (100, 100), (0, 0, 255), 4)\n return self.output_image", "def generate(self, diagram):", "def drawPageHeader(pre=\"\",post=\"\",corner=3,direction=0):\n dislin.paghdr(pre,post,corner,direction)", "def template1(self):\n self.indirectobject(1, 0, \"<<\\n /Type /Catalog\\n /Outlines 2 0 R\\n /Pages 3 0 R\\n>>\")\n self.indirectobject(2, 0, \"<<\\n /Type /Outlines\\n /Count 0\\n>>\")\n self.indirectobject(3, 0, \"<<\\n /Type /Pages\\n /Kids [4 0 R]\\n /Count 1\\n>>\")\n self.indirectobject(4, 0, \"<<\\n /Type /Page\\n /Parent 3 0 R\\n /MediaBox [0 0 612 792]\\n /Contents 5 0 R\\n /Resources <<\\n /ProcSet [/PDF /Text]\\n /Font << /F1 6 0 R >>\\n >>\\n>>\")\n self.indirectobject(6, 0, \"<<\\n /Type /Font\\n /Subtype /Type1\\n /Name /F1\\n /BaseFont /Helvetica\\n /Encoding /MacRomanEncoding\\n>>\")", "def writeVelocityPlot(self):\n name = \"velocity.vtk\"\n chargeFile = open(name,'w')\n chargeFile.write(\"%s\\n\"%(\"# vtk DataFile Version 2.0\"))\n chargeFile.write(\"%s\\n\"%(\"obtained via hydraulicmodule\"))\n chargeFile.write(\"%s\\n\"%(\"ASCII\"))\n chargeFile.write(\"%s\\n\"%(\"DATASET UNSTRUCTURED_GRID\"))\n chargeFile.write(\"%s %i %s\\n\"%(\"POINTS\",len(self.points),\"double\"))\n dim = self.mesh.getSpaceDimensions()\n if (dim==2): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n 0.))\n pass\n pass\n elif (dim==3): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n self.points[ind][2]))\n pass\n pass\n else:\n raise Exception(\" error in mesh dimension \") \n numberOfCells = self.mesh.getNumberOfCells()\n connectivity = self.mesh.getConnectivity()\n\n cellListSize = 0\n for i in range(0,numberOfCells): # gmsh meshes: type of elements\n gmshType = connectivity[i][1]\n if gmshType == 1: # 2-node line\n cellListSize += 3\n pass\n elif gmshType == 2: # 3-node triangles\n cellListSize += 4\n pass\n elif gmshType == 3: # 4-node quadrangles\n cellListSize += 5\n pass\n elif gmshType == 4: # 4-node tetrahedron\n cellListSize += 5\n pass\n elif gmshType == 5: # 8-node hexahedrons\n cellListSize += 9\n pass\n pass\n chargeFile.write(\"CELLS %i %i\\n\"%(numberOfCells,cellListSize))\n ind = 0\n for cell in connectivity:\n ind = cell[2]+3\n# print \" ctm dbg cell \",vtkTyp,ind,cell,\" perm \",permutation[ind],permutation[ind+1],permutation[ind+2],permutation[ind+3]\n # \n vtkTyp = _vtkGmsh(cell[1])\n if (vtkTyp==3): # 2-node line\n ind = cell[2]+3\n chargeFile.write(\"%i %i %i\\n\"%(\n 2,\\\n cell[ind]-1,\\\n cell[ind+1]-1)\n )\n pass\n \n elif (vtkTyp==5): # triangles\n chargeFile.write(\"%i %i %i %i\\n\"%(\n 3, \n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1)\n )\n pass\n elif (vtkTyp==9): # quadr\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==10): # tetra\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==12): # hexahedron\n chargeFile.write(\"%i %i %i %i %i %i %i %i %i\\n\"%(\n 8,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1,\\\n cell[ind+4]-1,\\\n cell[ind+5]-1,\\\n cell[ind+6]-1,\\\n cell[ind+7]-1)\n )\n pass\n pass\n chargeFile.write(\"%s %i\\n\"%(\"CELL_TYPES\",numberOfCells))\n#\n for i in range(0,numberOfCells):\n gmshType = connectivity[i][1]\n\n if (gmshType)==1:\n cellTyp = 3\n pass\n elif (gmshType)==2:\n cellTyp = 5\n pass\n elif (gmshType)==3:\n cellTyp = 9\n pass\n elif (gmshType)==4:\n cellTyp = 10\n pass\n elif (gmshType)==5:\n cellTyp = 12\n pass\n elif (gmshType)==6:\n cellTyp = 13\n pass\n elif gmshType == 7:\n cellTyp = 14\n pass\n else:\n raise Exception(\" check gmshtype \")\n chargeFile.write(\"%i\\n\"%(cellTyp))\n chargeFile.write(\"%s %d\\n\"%(\"POINT_DATA\",len(self.points)))\n chargeFile.write(\"%s\\n\"%(\"VECTORS vectors float\"))\n for velocityComponent in self.velocity:\n chargeFile.write(\" %e %e %e\\n \"%(velocityComponent[0], velocityComponent[1], velocityComponent[2]))\n chargeFile.write(\"%s\\n\"%(\"SCALARS charge double\"))\n chargeFile.write(\"%s\\n\"%(\"LOOKUP_TABLE default\"))\n#\n \n chargeDataFile=open(\"./\" + self.flowComponent.meshDirectoryName + \"/\" + \"HeVel.dat\",'r')\n line = chargeDataFile.readline()\n while \"Number Of Nodes\" not in line:\n line = chargeDataFile.readline()\n#line.split()\n nodesNumber = line.split()[-1]\n while \"Perm\" not in line:\n line = chargeDataFile.readline()\n#\n# We read the permutation\n#\n for i in range(int(nodesNumber)): chargeDataFile.readline()\n#\n# We read the charge\n#\n for i in range(int(nodesNumber)): chargeFile.write(\" %15.10e\\n \"%(float(chargeDataFile.readline())))", "def CreateBiPennate2():\r\n \r\n print('Opening Data...')\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n Vectors1 = LongaxisOrtho(Vectors1)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/Project_Gastro/workflows/Cesim/musc_mod_v2/OutputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Rotating Vectors...')\r\n \r\n # Rotate Vectors\r\n RotVectors1 = np.zeros((np.shape(Vectors1)[0],3))\r\n\r\n idxpos = np.argwhere(Centroids1[:,1] >= 0)\r\n idxpos = idxpos.flatten()\r\n idxneg = np.argwhere(Centroids1[:,1] < 0)\r\n idxneg = idxneg.flatten()\r\n\r\n PosVectors = RotationTransform(Vectors1[idxpos,:],degZ = -30)\r\n NegVectors = RotationTransform(Vectors1[idxneg,:],degZ = 30)\r\n RotVectors1[idxpos,:] = PosVectors[:,:]\r\n RotVectors1[idxneg,:] = NegVectors[:,:]\r\n print('Vectors Rotated \\n Inserting Plane...')\r\n \r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,RotVectors1,50,4)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.1)\r\n print('Interpolation Finished \\n Plotting...')\r\n \r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(211,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,1,1)\r\n\r\n ax2 = fig.add_subplot(212,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,1,1)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"New Centroid Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/BiPennateCentralPlaneFibres.dat\",Vectors2,header = header,comments='')", "def magnetization(h):\n if h.has_eh: raise\n if h.has_spin: \n mx = extract.mx(h.intra)\n my = extract.my(h.intra)\n mz = extract.mz(h.intra)\n else: raise\n np.savetxt(\"MAGNETIZATION_X.OUT\",np.matrix([h.geometry.x,h.geometry.y,mx]).T)\n np.savetxt(\"MAGNETIZATION_Y.OUT\",np.matrix([h.geometry.x,h.geometry.y,my]).T)\n np.savetxt(\"MAGNETIZATION_Z.OUT\",np.matrix([h.geometry.x,h.geometry.y,mz]).T)", "def generateNHDRHeader(self, inputFile):\r\n\r\n logging.info('Processing started')\r\n #initialize PCR object\r\n imagePCRFile = PCRDataObject()\r\n #import image parameters of PCR object\r\n imagePCRFile.ImportFromFile(inputFile)\r\n\r\n filePathName, fileExtension = os.path.splitext(inputFile)\r\n #The directory of the .nhdr file\r\n nhdrPathName = filePathName + \".nhdr\"\r\n\r\n if fileExtension == \".pcr\":\r\n if imagePCRFile.form == 1 or imagePCRFile.form == 5 or imagePCRFile.form == 10:\r\n with open(nhdrPathName, \"w\") as headerFile:\r\n headerFile.write(\"NRRD0004\\n\")\r\n headerFile.write(\"# Complete NRRD file format specification at:\\n\")\r\n headerFile.write(\"# http://teem.sourceforge.net/nrrd/format.html\\n\")\r\n if imagePCRFile.form == 5:\r\n headerFile.write(\"type: ushort\\n\")\r\n elif imagePCRFile.form == 10:\r\n headerFile.write(\"type: float\\n\")\r\n elif imagePCRFile.form == 1:\r\n headerFile.write(\"type: uchar\\n\")\r\n headerFile.write(\"dimension: 3\\n\")\r\n headerFile.write(\"space: left-posterior-superior\\n\")\r\n sizeX = imagePCRFile.X\r\n sizeY = imagePCRFile.Y\r\n sizeZ = imagePCRFile.Z\r\n headerFile.write(f\"sizes: {sizeX} {sizeY} {sizeZ}\\n\")\r\n volSpace = imagePCRFile.voxelSize\r\n headerFile.write(f\"space directions: ({volSpace}, 0.0, 0.0) (0.0, {volSpace}, 0.0) (0.0, 0.0, {volSpace})\\n\")\r\n headerFile.write(\"kinds: domain domain domain\\n\")\r\n headerFile.write(\"endian: little\\n\")\r\n headerFile.write(\"encoding: raw\\n\")\r\n headerFile.write(\"space origin: (0.0, 0.0, 0.0)\\n\")\r\n volPathName = filePathName + \".vol\"\r\n volPathSplit = []\r\n volPathSplit = volPathName.split('/')\r\n volFileName = volPathSplit[len(volPathSplit)-1]\r\n headerFile.write(f\"data file: {volFileName}\\n\")\r\n # print(imagePCRFile.form)\r\n print(f\".nhdr file path is: {nhdrPathName}\")\r\n #Automatically loading .vol file using the generated .nhdr file.\r\n if os.path.exists(volPathName):\r\n slicer.util.loadVolume(nhdrPathName)\r\n print(f\"{volFileName} loaded\\n\")\r\n else:\r\n print(f\"{volFileName} is not in the same directory\\n\")\r\n else:\r\n print(\"The format of this dataset is currently not supported by this module. Currently only float (format=10), unsigned 16 bit integer (format=5) and unsigned 8 bit integer (format=1) data types are supported. Please contact us with this dataset to enable this data type.\")\r\n else:\r\n print(\"This is not a PCR file, please re-select a PCR file\")", "def DrawHLine(pintX, pintY):\n for i in range(0, pintX):\n display.pixel(i, pintY, 1)", "def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()", "def genH(self,fp):\n id = 0\n for nm in GetOsekObjects('NM'):\n if(self == nm):\n break\n else:\n id += 1\n fp.write('\\n#define %s %s\\n'%(self.name,id))\n fp.write('#define %s_TYPE NM_%s\\n'%(self.name,self.getValue('TYPE')))\n fp.write('#define %s_tTyp %s\\n'%(self.name,self.getValue('TTYP')))\n fp.write('#define %s_tMax %s\\n'%(self.name,self.getValue('TMAX')))\n fp.write('#define %s_tError %s\\n'%(self.name,self.getValue('TERROR')))\n fp.write('#define %s_tTx %s\\n'%(self.name,self.getValue('TTX')))\n fp.write('#define %s_IDBASE %s\\n'%(self.name,self.getValue('IDBASE')))\n fp.write('#define %s_WINDOWMASK %s\\n'%(self.name,self.getValue('WINDOWMASK')))\n fp.write('#define %s_CONTROLLER %s\\n'%(self.name,self.getValue('CONTROLLER')))", "def vp():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Virtual Proxy details', 1)\r\n virtualproxy_metrics= ['description', 'prefix', 'authenticationModuleRedirectUri', 'sessionModuleBaseUri', 'loadBalancingModuleBaseUri', 'authenticationMethod', 'headerAuthenticationMode',\r\n 'headerAuthenticationHeaderName', 'headerAuthenticationStaticUserDirectory', 'headerAuthenticationDynamicUserDirectory', 'anonymousAccessMode', 'windowsAuthenticationEnabledDevicePattern',\r\n 'sessionCookieHeaderName', 'sessionCookieDomain', 'additionalResponseHeaders', 'sessionInactivityTimeout', 'extendedSecurityEnvironment', 'websocketCrossOriginWhiteList', 'defaultVirtualProxy',\r\n 'tags','samlMetadataIdP', 'samlHostUri', 'samlEntityId', 'samlAttributeUserId', 'samlAttributeUserDirectory', 'samlAttributeSigningAlgorithm', 'samlAttributeMap', 'magicLinkHostUri',\r\n 'magicLinkFriendlyName','jwtPublicKeyCertificate','jwtAttributeUserDirectory','jwtAttributeMap','jwtAttributeUserId']\r\n \r\n virtualproxynodes = get_qlik_sense.get_virtualproxy()\r\n num_of_virtualproxys = len(virtualproxynodes)\r\n num_of_virtualproxy_metrics = len(virtualproxy_metrics)\r\n table = document.add_table(rows=num_of_virtualproxy_metrics+1, cols=num_of_virtualproxys+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_virtualproxys):\r\n row.cells[item+1].text = virtualproxynodes[item][29]\r\n for item in range(num_of_virtualproxy_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(virtualproxy_metrics[item])\r\n for virtualproxynode in range(num_of_virtualproxys):\r\n row.cells[virtualproxynode+1].text = str(virtualproxynodes[virtualproxynode][item])\r\n\r\n document.add_page_break()", "def __init__(self):\n inkex.Effect.__init__(self)\n\n self.doc_center = None\n self.normal_line = {\n 'stroke': '#000000', # black\n 'fill': 'none', # no fill - just a line\n 'stroke-width': '1' # can also be in form '2mm'\n }\n self.cut_line = {\n 'stroke': '#ff0000', # black\n 'fill': 'none', # no fill - just a line\n 'stroke-width': '0.1' # can also be in form '2mm'\n }\n self.doted_line = {\n 'stroke': '#000000', # black\n 'fill': 'none', # no fill - just a line\n 'stroke-width': '1', # can also be in form '2mm'\n 'stroke-linecap': 'butt',\n 'stroke-linejoin': 'miter',\n 'stroke-miterlimit': '10',\n 'stroke-dasharray': '9.883,9.883',\n 'stroke-dashoffset': '0'\n }\n\n # Define the list of parameters defined in the .inx file\n self.OptionParser.add_option(\"-t\", \"--type\", type=\"string\", dest=\"type\", default='perso',\n help=\"Type of template rendered\")\n self.OptionParser.add_option(\"-u\", \"--units\", type=\"string\", dest=\"units\", default='cm',\n help=\"User interface units\")\n self.OptionParser.add_option(\"--style\", type=\"string\", dest=\"style\", default='print',\n help=\"Style of the template\")\n self.OptionParser.add_option(\"-n\", \"--neck\", type=\"float\", dest=\"neck\", default=11,\n help=\"Width of the neck\")\n self.OptionParser.add_option(\"-s\", \"--shoulder\", type=\"float\", dest=\"shoulder\", default=44,\n help=\"Width shoulder to shoulder\")\n self.OptionParser.add_option(\"--hip\", type=\"float\", dest=\"hip\", default=89,\n help=\"Hip measurement\")\n self.OptionParser.add_option(\"-w\", \"--waist\", type=\"float\", dest=\"waist\", default=79,\n help=\"Waist measurement\")\n self.OptionParser.add_option(\"-c\", \"--chest\", type=\"float\", dest=\"chest\", default=97,\n help=\"Chest measurement\")\n self.OptionParser.add_option(\"--hsptochest\", type=\"float\", dest=\"hsp_chest\", default=21,\n help=\"Lenght HSP to chest\")\n self.OptionParser.add_option(\"--hsptowaist\", type=\"float\", dest=\"hsp_waist\", default=45,\n help=\"Lenght HSP to waist\")\n self.OptionParser.add_option(\"--hsptohip\", type=\"float\", dest=\"hsp_hip\", default=67,\n help=\"Lenght HSP to hip\")\n self.OptionParser.add_option(\"-b\", \"--bicep\", type=\"float\", dest=\"bicep\", default=23,\n help=\"Bicep measurement\")\n self.OptionParser.add_option(\"--upersleeve\", type=\"float\", dest=\"top_sleeve\", default=20,\n help=\"Top lenght of the sleeve\")\n self.OptionParser.add_option(\"--bottomsleeve\", type=\"float\", dest=\"bottom_sleeve\", default=17,\n help=\"Bottom lenght of the sleeve\")\n self.OptionParser.add_option(\"-e\", \"--ease\", type=\"float\", dest=\"ease\", default=5,\n help=\"Amount of ease\")\n self.OptionParser.add_option(\"--neck_front\", type=\"float\", dest=\"neck_front\", default=0,\n help=\"Height of the front neck drop\")\n self.OptionParser.add_option(\"--neck_rear\", type=\"float\", dest=\"neck_rear\", default=6,\n help=\"Height of the rear neck drop\")\n self.OptionParser.add_option(\"--shoulder_drop\", type=\"float\", dest=\"shoulder_drop\", default=3,\n help=\"height of the shoulder\")\n self.OptionParser.add_option(\"--grid\", type=\"inkbool\", dest=\"grid\", default=True,\n help=\"Display the Reference Grid \")\n self.OptionParser.add_option(\"--temp\", type=\"inkbool\", dest=\"temp\", default=True,\n help=\"Display the template\")\n self.OptionParser.add_option(\"--active-tab\", type=\"string\", dest=\"active_tab\",\n default='title', help=\"Active tab.\")", "def printing():\r\n document.add_heading('Printing Service details', 1)\r\n\r\n printing_metrics = ['customproperties',\r\n 'workingSetSizeHiPct',\r\n 'logVerbosityAuditActivity',\r\n 'logVerbosityService',\r\n 'hostname',\r\n 'tags']\r\n\r\n printnodes = get_qlik_sense.get_printing()\r\n num_of_nodes = len(printnodes)\r\n num_of_print_metrics = len(printing_metrics)\r\n table = document.add_table(rows=num_of_print_metrics+1, cols=num_of_nodes+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_nodes):\r\n row.cells[item+1].text = printnodes[item][6]\r\n for item in range(num_of_print_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(printing_metrics[item])\r\n for printnode in range(num_of_nodes):\r\n row.cells[printnode+1].text = str(printnodes[printnode][item])\r\n\r\n document.add_page_break()", "def horizontal_line(t,n, h):\n lt(t)\n pu(t)\n fd(t,h)\n pd(t)\n lt(t)\n fd(t,n)\n rt(t)", "def _writeVariablesHeaderSection(self):\n self.header.write(wrapLine(\"NV\", self.annotation, self.delimiter, \"%d\\n\" % self.NV))\n self.header.write(wrapLine(\"VSCAL\", self.annotation, self.delimiter, ((\"%s\" + self.delimiter) * (self.NV - 1) + \"%s\\n\") % tuple(self.VSCAL)))\n self.header.write(wrapLine(\"VMISS\", self.annotation, self.delimiter, ((\"%s\" + self.delimiter) * (self.NV - 1) + \"%s\\n\") % tuple(self.VMISS)))\n self.header.write(wrapLines(\"VNAME\", self.annotation, self.delimiter, \"%s\\n\" * self.NV % tuple(self.VNAME)))", "def header(self) -> NoReturn:\n self.set_x(self.t_margin + self.b_margin)\n self.ln(self.line_height)", "def CreateBiPennate1():\r\n \r\n print('Opening Data...')\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Rotating Vectors...')\r\n \r\n # Rotate Vectors\r\n RotVectors1 = np.zeros((Vectors1.shape[0],3))\r\n\r\n idxpos = np.argwhere(Centroids1[:,1] >= 0)\r\n idxpos = idxpos.flatten()\r\n idxneg = np.argwhere(Centroids1[:,1] < 0)\r\n idxneg = idxneg.flatten()\r\n\r\n PosVectors = RotationTransform(Vectors1[idxpos,:],degZ = 30)\r\n NegVectors = RotationTransform(Vectors1[idxneg,:],degZ = -30)\r\n RotVectors1[idxpos,:] = PosVectors[:,:]\r\n RotVectors1[idxneg,:] = NegVectors[:,:]\r\n print('Vectors Rotated \\n Inserting Plane...')\r\n \r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,RotVectors1,50,4)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,1)\r\n print('Interpolation Finished \\n Plotting...')\r\n \r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"New Centroid Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/BiPennateCentralPlaneFibres30.dat\",Vectors2,header = header,comments='')", "def __print_logo(self, left=5, top=2, bottom=2):\n\n print('\\n' * top, end=\"\")\n\n for line in self.__logo:\n print((\" \" * left) + line, end=\"\")\n\n print('\\n' * bottom, end=\"\")", "def main():\n try:\n\n OUTPUTOVERVIEW.write(r'\\documentclass[12pt,a4paper,twocolumn]{article}'+'\\n\\n'\\\n r'\\usepackage[utf8x]{inputenc}'+'\\n'\\\n r'\\usepackage{graphicx}'+'\\n'\\\n r'\\usepackage{tikz}'+'\\n'\\\n r'\\usepackage[left=2.5cm, right=1cm, top=1.5cm, bottom=2cm]{geometry}'+'\\n'\\\n r'\\usepackage{xcolor}'+'\\n'\\\n r'\\usepackage{siunitx}'+'\\n'\\\n r'\\usepackage{titlesec}'+'\\n'\\\n r'\\titleformat{\\section}{\\Large\\scshape}{\\thesection}{1em}{}'+'\\n'\\\n r'\\titlespacing{\\section}{0pt}{12pt plus 4pt minus 2pt}{0pt plus 2pt minus 2pt}'+'\\n'\\\n r'\\setlength{\\parindent}{0pt}'+'\\n'\\\n r'\\usepackage{LatexColors.incl}'+'\\n'\\\n r'\\begin{document}'+'\\n' + '\\n')\n\n startletter = ''\n for strline in COLORLINES[1:]:\n\n if strline.strip():\n # get color name and hex\n colname = colorname(strline)\n\n if startletter != strline[:1]:\n startletter = strline[:1]\n OUTPUTOVERVIEW.write(r'\\section*{' + startletter +'}\\n')\n\n # get RBG\n rcol, gcol, bcol = tuple(int(colname[1][i:i+2], 16) for i in (0, 2, 4))\n\n # \\definecolor{airforceblue}{HTML}{5d8aa8}\n clname = strip_accents(re.sub(BAD_CHARS_NAME, '',\\\n colname[2], 0, re.MULTILINE | re.IGNORECASE)).title()\n\n rcol = rcol/255.\n gcol = gcol/255.\n bcol = bcol/255.\n\n cmyk = convert_rgb_cmyk(rcol, gcol, bcol)\n hsv = convert_rgb_hsv(rcol, gcol, bcol)\n hsl = convert_rgb_hsl(rcol, gcol, bcol)\n\n OUTPUTOVERVIEW.write(r'\\begin{minipage}{\\linewidth}\\tikz[baseline=1mm]\\draw [fill='\\\n + colname[0] + r', rounded corners=5pt] (0,0) rectangle (2cm,1cm); {\\textbf{'\\\n + clname + r'} \\\\ \\scriptsize{'+'RGB: {0:.0f}, {1:.0f}, {2:.0f}'\\\n .format(*tuple(int(colname[1][i:i+2], 16) for i in (0, 2, 4))) + r'; ' + \\\n r'HEX:~\\#' + colname[1] + r'\\\\' + \\\n r'CMYK: \\SI{{{0:.1f}}}{{\\percent}}, \\SI{{{1:.1f}}}{{\\percent}}, '\n r'\\SI{{{2:.1f}}}{{\\percent}}, \\SI{{{3:.1f}}}{{\\percent}}'\\\n .format(cmyk[0]*100, cmyk[1]*100, cmyk[2]*100, cmyk[3]*100) + r' \\\\' + \\\n r'HSV: \\SI{{{0:.0f}}}{{\\degree}}, \\SI{{{1:.1f}}}{{\\percent}}, '\n r'\\SI{{{2:.1f}}}{{\\percent}}'\\\n .format(hsv[0], hsv[1]*100, hsv[2]*100) + r' \\\\' + \\\n r'HSL: \\SI{{{0:.0f}}}{{\\degree}}, \\SI{{{1:.1f}}}{{\\percent}}, '\n r'\\SI{{{2:.1f}}}{{\\percent}}'\\\n .format(hsl[0], hsl[1]*100, hsl[2]*100)\\\n + '}}\\n'\\\n r'\\vspace{.5em}\\end{minipage}' + '\\n')\n\n OUTPUTOVERVIEW.write(r'\\end{document}')\n\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n # except Exception as ex: #comment for pylint 10.0!\n # print(str(ex))\n else:\n print('Overview file written.')\n OUTPUTOVERVIEW.close()", "def draw(self):\n # 5 is the number of characters per box add one for the header column\n sepreator_line = \"-\" * (len(self.letters) + 1) * 5 + \"-\"\n print(sepreator_line)\n print(\n \"| \" + \"\".join([f\"| {letter} \" for letter in self.letters]) + \"|\")\n print(sepreator_line)\n for number in self.numbers:\n print(f\"| {number} \" + \"\".join(\n [f\"| {self.positions[letter + number]} \" for letter in self.letters]) + \"|\")\n print(sepreator_line)", "def lines():\n line_dict = {}\n #\n line_dict['ArI'] = 2**0\n line_dict['HgI'] = 2**1\n line_dict['KrI'] = 2**2\n line_dict['NeI'] = 2**3\n line_dict['XeI'] = 2**4\n line_dict['CdI'] = 2**5\n line_dict['ZnI'] = 2**6\n line_dict['HeI'] = 2**7\n line_dict['OH_R24000'] = 2**8\n line_dict['OH_triplespec'] = 2**9\n line_dict['CuI'] = 2**10\n line_dict['ArII'] = 2**11\n line_dict['OH_XSHOOTER'] = 2**12\n line_dict['OH_GNIRS'] = 2**13\n line_dict['OH_NIRES'] = 2**14\n line_dict['ThAr_XSHOOTER_VIS'] = 2**15\n line_dict['OH_GMOS'] = 2**16\n line_dict['OH_MODS'] = 2**17\n line_dict['ThAr_MagE'] = 2**18 # R=4100\n line_dict['OH_FIRE_Echelle'] = 2**19 # R=6000\n line_dict['Ar_IR_GNIRS'] = 2**20 # R=6000\n line_dict['FeI'] = 2**21\n line_dict['FeII'] = 2**22\n line_dict['UNKNWN'] = 2**23\n line_dict['Ar_IR_MOSFIRE'] = 2 ** 24\n line_dict['Ne_IR_MOSFIRE'] = 2 ** 25\n line_dict['OH_MOSFIRE_Y'] = 2 ** 26\n line_dict['OH_MOSFIRE_J'] = 2 ** 27\n line_dict['OH_MOSFIRE_H'] = 2 ** 28\n line_dict['OH_MOSFIRE_K'] = 2 ** 29\n line_dict['ThAr_XSHOOTER_UVB'] = 2**30\n #\n return line_dict", "def page(self, lines, dpi):\r\n self.__setdpi(dpi)\r\n rows = len(lines)\r\n page = self.__newpage(rows)\r\n row = 0\r\n for line in lines:\r\n if (self.__debug): print(row, line, len(line))\r\n elongated = False\r\n y = row * self.__height + self.__margin\r\n column = 0\r\n for char in line:\r\n x = column % self.__columns * self.__width + self.__margin\r\n c = ord(char)\r\n if (char == self.__elongate):\r\n elongated = True\r\n continue\r\n if (c > 127 and c < 255): c = c & 0b10001111\r\n i = c - ord(' ')\r\n if (i > 0 and i < len(self.__chars)):\r\n clip = self.__chars[i]\r\n mask = self.__masks[i]\r\n clip_width = self.__width\r\n if (elongated):\r\n clip_width *= 2\r\n clip = clip.resize((clip_width, self.__height))\r\n mask = mask.resize((clip_width, self.__height))\r\n box = (x, y, x + clip_width, y + self.__height)\r\n page.paste(clip, box, mask)\r\n if (elongated):\r\n column += 2\r\n elongated = False\r\n else:\r\n column += 1\r\n row += 1\r\n return page" ]
[ "0.5855003", "0.56978124", "0.5614265", "0.55788046", "0.55214256", "0.55017865", "0.53626406", "0.5358919", "0.53464925", "0.5337525", "0.5325445", "0.53254324", "0.5279234", "0.5271747", "0.5269328", "0.5254424", "0.5251807", "0.52497333", "0.5235495", "0.52344364", "0.5231171", "0.5206225", "0.5204333", "0.5196694", "0.5179811", "0.51669526", "0.5163135", "0.51492804", "0.51492465", "0.51414907" ]
0.7059355
0
Frame routine, including main pipeline, triplet builup, and trajectory pipeline.
def run_frame(self, image): self.frame_idx += 1 # run main pipeline t0 = datetime.now() disp = self.main_pipeline(image) t1 = datetime.now() logging.info('main pipeline: {}'.format(get_tdiff(t0, t1))) # prepare image sequence of 3 for trajectory pipeline t0 = datetime.now() self.image_seq.append(image) if len(self.image_seq) > 3: del self.image_seq[0] t1 = datetime.now() logging.info('image stack: {}'.format(get_tdiff(t0, t1))) # run trajectory pipeline t0 = datetime.now() if len(self.image_seq) >= 3: self.egomo_trmat = self.traj_pipeline(prev_trmat=self.egomo_trmat) t1 = datetime.now() logging.info('traj pipeline: {}'.format(get_tdiff(t0, t1))) return self.frame_idx, disp, self.egomo_trmat, self.t_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_pipeline(frame, keep_state=True):\n\n global line_lt, line_rt, processed_frames\n\n # undistort the image using coefficients found in calibration\n undistorted_img = undistort(frame, mtx, dist)\n\n # binarize the frame and highlight lane lines\n binarized_img = binarize(undistorted_img)\n\n # perspective transform to obtain bird's eye view\n birdeye_img, matrix, inversed_matrix = birdeye(binarized_img, visualise=False)\n\n # 2 order polynomial curve fit onto lane lines found\n if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:\n find_lane_by_previous_fits(birdeye_img, line_lt, line_rt, visualise=False)\n else:\n find_lane_by_sliding_windows(birdeye_img, line_lt, line_rt, n_windows=9, visualise=False)\n\n # compute offset in meter from center of the lane\n offset_meter = offset_from_lane_center(line_lt, line_rt, frame_width=frame.shape[1])\n\n # draw the surface enclosed by lane lines back onto the original frame\n blend_on_road = draw_back_onto_the_road(undistorted_img, inversed_matrix, line_lt, line_rt, keep_state)\n mean_curvature_meter = np.mean([line_lt.curvature_meter, line_rt.curvature_meter])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(blend_on_road, 'Curvature radius: {:.02f}m'.format(mean_curvature_meter), (60, 60), font, 1,\n (255, 255, 255), 2)\n cv2.putText(blend_on_road, 'Offset from center: {:.02f}m'.format(offset_meter), (60, 90), font, 1,\n (255, 255, 255), 2)\n\n processed_frames += 1\n\n return blend_on_road", "def run_frame(self, ti, img):\n pass", "def frameProcessing():\n\tglobal referenceFrame\n\tglobal dilatedFrame\n\t#receive the image from the request.\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\")\n\t\n\t#gray-scale conversion and Gaussian blur filter applying\n\tgrayFrame = greyScaleConversion(frame)\n\tblurredFrame = gaussianBlurring(grayFrame)\n\n\t#Check if a frame has been previously processed and set it as the previous frame.\n\tif type(referenceFrame) ==int():\n\t\treferenceFrame = blurredFrame\n\t\n\t#Background subtraction and image binarization\n\tframeDelta = getImageDiff(referenceFrame, blurredFrame)\n\treferenceFrame = blurredFrame\n\t#cv2.imwrite(\"previousImage.jpg\", blurredFrame)\n\tframeThresh = thresholdImage(frameDelta, binarizationThreshold)\n\n\t#Dilate image and find all the contours\n\tdilatedFrame = dilateImage(frameThresh)\n\t#cv2.imwrite(\"dilatedFrame.jpg\", dilatedFrame)\n\tcnts = getContours(dilatedFrame.copy())\n\t\n\theight = np.size(frame,0)\n\tcoordYEntranceLine = int((height / 2)-offsetEntranceLine)\n\tcoordYExitLine = int((height / 2)+offsetExitLine)\n\theaders = {\"enctype\" : \"multipart/form-data\"}\n\tr = requests.post(\"http://\" + getNextServer() + \"/objectClassifier\", headers = headers, json = {\"Frame\":frame.tolist()} )\n\t\"\"\"\t\n\tfor c in cnts:\n\t\tprint(\"x\")\n\t\tif cv2.contourArea(c) < minContourArea:\n\t\t\tprint(\"Small Area\", cv2.contourArea(c))\n\t\t\tcontinue\n\t\t(x, y, w, h) = getContourBound(c)\n\t\t#grab an area 2 times larger than the contour.\n\t\tcntImage = frame[y:y+int(1.5*w), x:x+int(1.5*h)]\n\t\tobjectCentroid = getContourCentroid(x, y, w, h)\n\t\tcoordYCentroid = (y+y+h)/2\n\t\t\n\t\t\n\t\t#if (checkEntranceLineCrossing(coordYCentroid,coordYEntranceLine,coordYExitLine)):\t\t\t\t\n\t\theaders = {\"enctype\" : \"multipart/form-data\"}\n\t\t#i = random.randint(1,1000)\n\t\t#cv2.imwrite(\"ContourImages/contour\"+str(i)+\".jpg\", cntImage)\n\t\t#files = {\"image\":open(\"ContourImages/contour\"+str(i)+\".jpg\", \"rb\")}\n\t\tdata = {\"contour\" : cntImage.tolist()}\n\t\tr = requests.post(\"http://\" + getNextServer() + \"/objectClassifier\", headers = headers, json = data )\n\n\t\n\t\"\"\"\n\treturn Response(status=200)", "def __init__(self, camIdx, dropFrames = False, classify_behavior = False, behavior_model_path = None,\n implement_control = False):\n\n\n self.classify_behavior = classify_behavior\n self.implement_control = implement_control\n\n if classify_behavior:\n if behavior_model_path == None:\n raise Exception(\"No behavior model path defined\")\n else:\n self.behaviorModel = pickle.load(open(behavior_model_path, 'rb'))\n\n #Storage structure for frames, Thinking can sequentially add frames for multiple cameras\n self.queue = Queue() #Temporary frame storage, only used for retrieval\n self.stopped = False #Boolean stop for threaded frame retrieval\n self.curFrames = [] #Current Frame or Batch of Frames pointer\n self.dropFrames = dropFrames #Flag to skip frames if processing speed is lower than\n #stream speed\n self.frameCounter = 0 #Number of total frames that have been retrieved from the stream\n self.curIdx = 0 #The index of the current frame to process\n #We have two references for indexing: frameCounter and curIdx. When Dropframes is true,\n #frameCounter=curIdx as the current frame will be the most recently pulled frame. When\n #dropFrames is false, frameCounter >= curIdx as there can be a backlog of frames to process\n #and the current frame to process is not the most recently pulled frame\n self.width = None\n self.height = None\n self.meta = ordDict({\n 'Frame Idx':[],\n 'Frame Read':[], #Whether or not the frame was actually read, this can only be false\n #if dropFrames flag is set to True\n 'Time Retrieved':[],\n 'Time Read':[],\n 'Time Processed':[],\n 'Time Displayed':[],\n 'Processing Time':[],\n 'Displaying Time':[],\n 'Total Time':[]\n })\n\n #If input for stream is an int, will convert into a list, might build in funciton to check\n #all input types. ie if one string is passed in\n if isinstance(camIdx, int):\n camIdx = [camIdx]\n\n #Curframe will be set back to it initalization if a frame is read or replaced by the\n #following frame if it had not yet been read; self.frame will only be replaced by the next read frame\n if isinstance(camIdx, list):\n self.streams = [] #input streams\n self.poseData = []\n self.frame = [] #most recently retrieved frame\n self.streamCount = len(camIdx) #number of streams\n self.multiple = self.streamCount > 1\n self.camIdx = camIdx\n self.behaviors = []\n\n #self.updateMeta()\n\n elif camIdx == None:\n print(\"Initalizing empty Stream Handler\")\n else:\n raise Exception('Invalid input for camIdx')", "def main_loop():\n\n editor = 'FrameEditorEmpty'\n merge = 'FrameMergerFirst'\n editorparams = ''\n mergerparams = ''\n framesrcparams = 'localhost:5005'\n framedstparams = 'localhost:5005'\n framesource = 'CameraFrameGenerator'\n framesdestination = 'FrameSinkShower'\n\n if '-framesource' in sys.argv:\n framesource = sys.argv[sys.argv.index('-framesource') + 1]\n if len(sys.argv) > sys.argv.index('-framesource') + 2 \\\n and sys.argv[sys.argv.index('-framesource') + 2][0] != '-':\n framesrcparams = sys.argv[sys.argv.index('-framesource') + 2]\n\n if '-framedestination' in sys.argv:\n framesdestination = sys.argv[sys.argv.index('-framedestination') + 1]\n if len(sys.argv) > sys.argv.index('-framedestination') + 2 \\\n and sys.argv[sys.argv.index('-framedestination') + 2][0] != '-':\n framedstparams = sys.argv[sys.argv.index('-framedestination') + 2]\n\n if '-editor' in sys.argv:\n editor = sys.argv[sys.argv.index('-editor') + 1]\n\n if '-merge' in sys.argv:\n merge = sys.argv[sys.argv.index('-merge') + 1]\n\n if '-editorparams' in sys.argv:\n editorparams = sys.argv[sys.argv.index('-editorparams') + 1]\n\n if '-mergerparams' in sys.argv:\n mergerparams = sys.argv[sys.argv.index('-mergerparams') + 1]\n\n # print \"From %s:%s to %s:%s, edit by %s\" % (fromhost, _from, tohost, _to, editor)\n\n frameEditor = eval(editor)(editorparams)\n frameMerger = eval(merge)(mergerparams)\n framesSrc = eval(framesource)(framesrcparams)\n framesDst = eval(framesdestination)(framedstparams)\n\n receive_and_sink_video(framesSrc=framesSrc, framesDst=framesDst, frameEditor=frameEditor, frameMerger=frameMerger)", "def process_frame(self, frame):\n\t\treturn frame", "def update_frame(self, frame):\n\n t = datetime.now()\n delta_t = t - self.dpar.frame_timestamp[0]\n fps = self.dpar.update_fps(1./delta_t.total_seconds())\n\n self.dpar.frame_timestamp[0] = t\n\n if self.config.black_correct:\n cframe = self.ffc.black_correct(frame)\n else:\n cframe = frame\n\n self.dpar.latest_frame = np.copy(cframe)\n \n if self.dpar.cap_live_swap:\n pix, gray = self._get_pixmap(cframe[::4,::4], self.dpar.iwindow[0])\n self.cap_screen.cap_title = self._live_title(fps)\n self.cap_screen.setPixmap(pix)\n else: \n pix, gray = self._get_pixmap(cframe, self.dpar.iwindow[0])\n self.live_screen.live_title = self._live_title(fps)\n self.live_screen.setPixmap(pix)\n\n self.draw_histogram()\n\n\n if self.recording_sequence:\n\n # MRP ToDo update these tags properly.\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n ifi_ms = 1000. / self.camera.actual_frame_rate\n ts_ms = np.int(np.round(ifi_ms * self.seq_frame_num))\n\n self.ifd.update_tags((self.seq_frame_num, 0), et, 0, ts_ms, 99)\n\n cap_image = np.copy(self.dpar.latest_frame).astype(np.uint16)\n #cv2.imwrite(cfn, (cap_image << (16 - self.camera.pixel_bits)).astype(np.uint16))\n\n \"\"\"\n Perform the TIFF windowing and then rebinning (compress) according to config file options\n \"\"\"\n x0 = max(0, (cap_image.shape[1] - config.tiff_seq_x_window) // 2)\n x1 = cap_image.shape[1] - x0\n y0 = max(0, (cap_image.shape[0] - config.tiff_seq_y_window) // 2)\n y1 = cap_image.shape[0] - y0\n cap_image = cap_image[y0:y1, x0:x1]\n\n shift_bits = 16 - self.camera.pixel_bits\n if config.tiff_seq_rebin > 1: # not tested for r ne 2\n r = config.tiff_seq_rebin\n cap_image = cap_image.reshape((cap_image.shape[0] // r, r, cap_image.shape[1] // r, -1)).sum(axis=3).sum(axis=1)\n extra_bits = 2 * (r.bit_length() -1)\n shift_bits = max(0, shift_bits - extra_bits)\n\n\n #im = PIL.Image.fromarray(gray)\n im = PIL.Image.fromarray((cap_image << shift_bits).astype(np.uint16))\n\n im.save(self.tiff_out, tiffinfo=self.ifd, compression=TIFF_COMPRESSION)\n self.tiff_out.newFrame()\n self.seq_frame_num += 1\n self.seq_frame_label.setText(str(self.seq_frame_num))\n\n if self.recording_video:\n # cframe is int16\n #f8 = ((cframe >> (self.camera.pixel_bits - 8)) & 0xff).astype(np.uint8)\n #Style 1:\n #fc = np.stack((f8, f8, f8), axis=-1)\n #self.rv_vout.write(fc)\n #Style 2&3:\n self.rv_vout.write(gray)\n self.recorded_video_frame_number += 1\n #Style 4: (16-bit)\n #self.rv_vout.write(cframe)\n\n #if self.recorded_video_frame_number == 20:\n # self.record_video() # turn off", "def main_pipeline(self, image):\n # detection\n t0 = datetime.now()\n bbox_list, score_list, label_list = self.det.inference(image)\n t1 = datetime.now()\n logging.info('main pipeline (det): {}'.format(get_tdiff(t0, t1)))\n \n # estimation\n t0 = datetime.now()\n disp = self.est.inference(image)\n depth_list = self.est.calc_depth(bbox_list)\n t1 = datetime.now()\n logging.info('main pipeline (est): {}'.format(get_tdiff(t0, t1)))\n \n # tracker predict\n t0 = datetime.now()\n for t in self.t_list:\n t.predict()\n t1 = datetime.now()\n logging.info('main pipeline (trk_pred): {}'.format(get_tdiff(t0, t1)))\n \n # associate\n t0 = datetime.now()\n matched_pair, unmatched_bbox_list, _ = associate(bbox_list, label_list, self.t_list)\n t1 = datetime.now()\n logging.info('main pipeline (da_solver): {}'.format(get_tdiff(t0, t1)))\n \n t0 = datetime.now()\n # update trackers for matched_pair\n for m in matched_pair:\n t = self.t_list[m[1]]\n bbox = bbox_list[m[0]]\n depth = depth_list[m[0]]\n est_dict = {\n 'label': label_list[m[0]],\n 'score': score_list[m[0]]}\n t.update(self.frame_idx, bbox, depth, est_dict)\n \n # update in-track status of all trackers\n for t in self.t_list:\n t.update_status(self.frame_idx)\n \n # purge out dead trackers\n self.t_list = [t for t in self.t_list if t.get_status()]\n\n # create new trackers for unmatched_bbox_list\n for b_idx in unmatched_bbox_list:\n bbox = bbox_list[b_idx]\n depth = depth_list[b_idx]\n est_dict = {\n 'label': label_list[b_idx],\n 'score': score_list[b_idx]}\n self.t_list.append(tracker(self.t_cfg, self.tid_new, bbox, depth, est_dict))\n self.tid_new += 1\n\n t1 = datetime.now()\n logging.info('main pipeline (trk_upd): {}'.format(get_tdiff(t0, t1)))\n\n # disparity map for display\n return disp", "def anim_produce_frame(up_to_line, *fargs):\n #unpack *fargs\n axes,running_reward_exists,running_loss_exists,actions_exists,\\\n running_reward_file,running_loss_file,actions_file,actions_to_plot, \\\n actions_per_log,is_tri,actions_ylim = fargs\n #produce the plots for the current frame\n axis_ind = 0\n if running_reward_exists:\n axes[axis_ind].clear()\n plot_running_reward_on_axis(running_reward_file, axes[axis_ind], up_to_line)\n axis_ind += 1\n if running_loss_exists:\n axes[axis_ind].clear()\n axes[axis_ind+1].clear()\n plot_running_loss_on_axis(running_loss_file, axes[axis_ind],axes[axis_ind+1], up_to_line)\n axis_ind += 2\n if actions_exists:\n axes[axis_ind].clear()\n plot_actions_on_axis(actions_file,axes[axis_ind],is_tri,actions_to_plot=actions_to_plot,\n plot_to_file_line=int(up_to_line*actions_per_log),\n actions_ylim=actions_ylim)", "def calculate_frame(self):\n frame = self.stream.read()\n self.keypoints, self.image = self.openpose.forward(frame, True)", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def builddataframe(brick, path = \"..\", cutstring = \"1\", major = 0, minor = 0, newzprojection = None, charmsim = False):\n nplate =0\n\n print(\"Reading ScanSet at path \",path)\n\n #reading scanset\n sproc = r.EdbScanProc()\n sproc.eProcDirClient=path\n id = r.EdbID(brick,nplate,major,minor)\n ss = sproc.ReadScanSet(id)\n ss.Brick().SetID(brick)\n \n #preparing patterns\n npl = ss.eIDS.GetEntries()\n\n cut = r.TCut(cutstring)\n\n #intial empty arrays\n IDall = np.zeros(0,dtype=int)\n PIDall = np.zeros(0,dtype=int)\n\n xall = np.zeros(0,dtype=np.float32)\n yall = np.zeros(0,dtype=np.float32)\n zall = np.zeros(0,dtype=np.float32)\n TXall = np.zeros(0,dtype=np.float32)\n TYall = np.zeros(0,dtype=np.float32)\n\n MCEvtall = np.zeros(0,dtype=int)\n MCTrackall = np.zeros(0,dtype=int)\n Pall = np.zeros(0,dtype=np.float32)\n Flagall = np.zeros(0,dtype=int)\n\n print (\"Cut on couples \")\n cut.Print()\n\n print(\"Try to open folders at path \",path+\"/b00000\"+str(brick))\n for i in range(npl):\n idplate = ss.GetID(i)\n \n nplate = idplate.ePlate\n plate = ss.GetPlate(idplate.ePlate)\n #read pattern information\n p = r.EdbPattern()\n\n ect = r.EdbCouplesTree()\n if (nplate) <10:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p00{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n else:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p0{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n\n #addingcut\n ect.eCut = cut \n cutlist = ect.InitCutList()\n \n nsegcut = cutlist.GetN()\n nseg = ect.eTree.GetEntries()\n\n IDarray_plate = np.zeros(nsegcut,dtype=int)\n PIDarray_plate = np.zeros(nsegcut,dtype=int)\n\n xarray_plate = np.zeros(nsegcut,dtype=np.float32)\n yarray_plate = np.zeros(nsegcut,dtype=np.float32)\n zarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TXarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TYarray_plate = np.zeros(nsegcut,dtype=np.float32)\n \n MCEvtarray_plate = np.zeros(nsegcut,dtype=int)\n MCTrackarray_plate = np.zeros(nsegcut,dtype=int)\n Parray_plate = np.zeros(nsegcut,dtype=np.float32)\n Flagarray_plate = np.zeros(nsegcut,dtype=int)\n\n print (\"loop on {} segments over {} for plate {}\".format(nsegcut, nseg,nplate))\n for ientry in range(nsegcut):\n iseg = cutlist.GetEntry(ientry)\n ect.GetEntry(iseg)\n \n seg=ect.eS\n #//setting z and affine transformation\n seg.SetZ(plate.Z())\n seg.SetPID(i)\n seg.Transform(plate.GetAffineXY())\n\n if(newzprojection is not None):\n seg.PropagateTo(newzprojection[i])\n\n IDarray_plate[ientry] = seg.ID()\n PIDarray_plate[ientry] = seg.PID()\n \n xarray_plate[ientry] = seg.X()\n yarray_plate[ientry] = seg.Y()\n zarray_plate[ientry] = seg.Z()\n TXarray_plate[ientry] = seg.TX()\n TYarray_plate[ientry] = seg.TY()\n\n MCEvtarray_plate[ientry] = seg.MCEvt()\n MCTrackarray_plate[ientry] = seg.MCTrack()\n Parray_plate[ientry] = seg.P() \n if charmsim: #different place where pdgcode is stored\n Flagarray_plate[ientry] = seg.Vid(0)\n else:\n Flagarray_plate[ientry] = seg.Flag() \n\n #end of loop, storing them in global arrays\n IDall = np.concatenate((IDall,IDarray_plate),axis=0)\n PIDall = np.concatenate((PIDall,PIDarray_plate),axis=0)\n\n xall = np.concatenate((xall,xarray_plate),axis=0)\n yall = np.concatenate((yall,yarray_plate),axis=0)\n zall = np.concatenate((zall,zarray_plate),axis=0)\n TXall = np.concatenate((TXall,TXarray_plate),axis=0)\n TYall = np.concatenate((TYall,TYarray_plate),axis=0)\n MCEvtall = np.concatenate((MCEvtall,MCEvtarray_plate),axis=0)\n MCTrackall = np.concatenate((MCTrackall,MCTrackarray_plate),axis=0)\n Pall = np.concatenate((Pall,Parray_plate),axis=0)\n Flagall = np.concatenate((Flagall,Flagarray_plate),axis=0)\n\n data = {'ID':IDall,'PID':PIDall,'x':xall,'y':yall,'z':zall,'TX':TXall,'TY':TYall,'MCEvent':MCEvtall,'MCTrack':MCTrackall,'P':Pall,'Flag':Flagall}\n df = pd.DataFrame(data, columns = ['ID','PID','x','y','z','TX','TY','MCEvent','MCTrack','P','Flag'] )\n\n return df", "def process(self):\n frame_count = 0\n size = self.frame.size\n while True:\n try:\n for i in range(parallel.BUFFER_LENGTH):\n offset = i * size;\n self.manager.image[offset : offset + size] = self.frame.ravel()\n self.ret, self.frame = self.capture.read()\n if not self.ret:\n self.clear_buffer(offset=offset + size + 1)\n raise StopIteration\n if DEBUG_LEVEL > 2:\n cv.imshow(self.name, self.frame)\n frame_count += 1\n key = cv.waitKey(self.toggle)\n if key is 27:\n raise StopIteration\n return\n elif key is 32:\n self.toggle ^= 1\n self.manager.detect()\n self.barrier.wait()\n except StopIteration:\n # Handle dangling frames in buffer and return gracefully\n self.manager.detect()\n self.barrier.wait()\n self.cleanup()\n try:\n # Handle rangequits in Phase 1\n for rv in self.variables:\n for event in rv['events']:\n if event['event_subtype'] == \"Finish\":\n return self.variables\n return None\n except:\n # Phase 0 -- no handling\n return self.variables\n except:\n # Any other exception is bad!\n return None", "def __init__(self, frame):\n self.frame = frame", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def extract_frames(pipe, \r\n cfg, \r\n save_path,\r\n post_processing=False,\r\n save_colorize=True,\r\n save_pc=False,\r\n visualize=True):\r\n # Configurations\r\n if save_colorize:\r\n colorizer = rs.colorizer()\r\n if save_pc:\r\n pc = rs.pointcloud()\r\n points = rs.points()\r\n # Save path\r\n if not os.path.exists(save_path):\r\n os.makedirs(save_path)\r\n\r\n # Start the pipe\r\n i = 0\r\n profile = pipe.start(cfg)\r\n device = profile.get_device()\r\n playback = device.as_playback()\r\n playback.set_real_time(False) # Make sure this is False or frames get dropped\r\n while True:\r\n try:\r\n # Wait for a conherent pairs of frames: (rgb, depth)\r\n pairs = pipe.wait_for_frames()\r\n\r\n # Align depth image to rgb image first\r\n align = rs.align(rs.stream.color)\r\n pairs = align.process(pairs)\r\n\r\n color_frame = pairs.get_color_frame()\r\n depth_frame = pairs.get_depth_frame()\r\n if not depth_frame or not color_frame:\r\n continue\r\n\r\n # Post-processing\r\n if post_processing:\r\n depth_frame = post_processing(depth_frame)\r\n\r\n # Get rgb-d images\r\n color_img = np.asanyarray(color_frame.get_data())\r\n color_img = cv2.cvtColor(color_img, cv2.COLOR_RGB2BGR)\r\n depth_img = np.asanyarray(depth_frame.get_data())\r\n print('Frame {}, Depth Image {}, Color Image {}'.format(i+1, depth_img.shape, color_img.shape))\r\n \r\n # Save as loseless formats\r\n cv2.imwrite(os.path.join(save_path, '{}_rgb.png'.format(i)), color_img, [cv2.IMWRITE_PNG_COMPRESSION, 0])\r\n np.save(os.path.join(save_path, '{}_depth.npy'.format(i)), depth_img)\r\n \r\n if save_colorize:\r\n # Save colorized depth map\r\n depth_img_colorized = np.asanyarray(colorizer.colorize(depth_frame).get_data())\r\n cv2.imwrite(os.path.join(save_path, '{}_depth_colorized.jpg'.format(i)), depth_img_colorized) # No need for lossless here\r\n \r\n if save_pc:\r\n # NOTE: Point cloud calculation takes longer time.\r\n pc.map_to(color_frame)\r\n points = pc.calculate(depth_frame)\r\n points.export_to_ply(os.path.join(save_path, '{}_pc.ply'.format(i)), color_frame)\r\n \r\n i += 1\r\n\r\n if visualize:\r\n # Stack both images horizontally\r\n images = np.vstack((color_img, depth_img_colorized))\r\n\r\n # Show images\r\n cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)\r\n cv2.imshow('RealSense', images)\r\n cv2.waitKey(1)\r\n \r\n except Exception as e:\r\n print(e)\r\n break\r\n\r\n # Clean pipeline\r\n pipe.stop()\r\n print('{} frames saved in total.'.format(i))\r\n\r\n return", "def run_pipeline(directory):\n\n # io = IO(path)\n # df = io.load_cleaned_file(download_always=False)\n # df = add_choke_events(df)\n\n # Add calls to features.Xxx here\n\n #directory = main_directory\n site=os.listdir(directory)\n site_dicom={}\n site_dicom_sub={}\n site_sub_files={}\n i,k,j=0,0,0\n for filename in site:\n site_dicom[i]=directory+'/'+filename+'/DICOM-raw'\n temporary_path=os.listdir(site_dicom[i])\n\n for another_file in temporary_path:\n site_dicom_sub[j]=site_dicom[i]+'/'+another_file+'/scans'\n temporary_path_1 = os.listdir(site_dicom_sub[j])\n for another_file_1 in temporary_path_1:\n site_sub_files[k]=site_dicom_sub[j]+'/'+another_file_1+'/'\n k=k+1\n j = j + 1\n i=i+1\n splitted={}\n output_mif={}\n for i in range (len(site_sub_files)):\n splitted[i]=site_sub_files[i].split('/')\n output_mif[i]=directory+'/'+splitted[i][5]+'/MIF-raw/'+splitted[i][5]+'_'+splitted[i][7]+'_'+splitted[i][9]+'.mif'\n\n\n # save (or return) dataframe here?\n return site_sub_files,output_mif", "def run_record(self, state, pipeline, record_path):\n e1 = cv2.getTickCount()\n while state.record_btn:\n # Wait for a coherent pair of frames: depth and color\n frames = pipeline.wait_for_frames()\n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n if not depth_frame or not color_frame:\n continue\n \n # filter\n depth_frame = self.th_filter.process(depth_frame)\n depth_frame = self.sp_filter.process(depth_frame)\n depth_frame = self.tmp_filter.process(depth_frame)\n\n depth_colormap = self.colorizer.colorize(depth_frame)\n # Convert images to numpy arrays\n depth_image = np.asanyarray(depth_colormap.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n\n lane_masked = self.lane_detector.detect(color_image)\n # Show images\n stacked_imgs = (color_image, depth_image, lane_masked)\n images = np.hstack(stacked_imgs)\n cv2.resizeWindow(state.WIN_NAME, \n self.width*len(stacked_imgs), \n self.height)\n cv2.imshow(state.WIN_NAME, images)\n cv2.setMouseCallback(state.WIN_NAME, state.mouse_controll)\n key = cv2.waitKey(1)\n if key == 27:\n state.app_btn = False\n state.record_btn = False\n break\n # Calculate Runtime Tick to quit\n e2 = cv2.getTickCount()\n tick = int((e2 - e1) / cv2.getTickFrequency())\n # Save images per tick\n if self.saveimg:\n color_file = record_path / f\"color-{tick}.npy\"\n depth_file = record_path / f\"depth-{tick}.npy\"\n ps_file = record_path / f\"ps-{tick}.ply\"\n if not ps_file.exists():\n np.save(color_file, color_image)\n np.save(depth_file, depth_image)\n \n # Create point cloud\n if self.savepc and (not ps_file.exists()):\n points = self.pc.calculate(depth_frame)\n self.pc.map_to(depth_frame)\n points.export_to_ply(str(ps_file), color_frame)\n\n if tick > self.record_time:\n print(\"Finish Record\")\n state.app_btn = False\n state.record_btn = False\n cv2.destroyAllWindows()\n break\n\n if not state.app_btn:\n break", "def _nextAnimFrame(step=0):\n lfp_frame.set_data(timestamps[step:step+frame_size], lfp[step:step+frame_size])\n r_raw_frame.set_data(timestamps[step:step+frame_size], raw_ripple[step:step+frame_size])\n r_pow_frame.set_data(timestamps[step:step+frame_size], ripple_power[step:step+frame_size])\n lfp_measure.set_text(txt_template % timestamps[step])\n # Updating the limits is needed still so that the correct range of data\n # is displayed! It doesn't update the axis labels though - That's a\n # different ballgame!\n plot_axes.set_xlim(timestamps[step], timestamps[step+frame_size])\n return lfp_frame, r_raw_frame, r_pow_frame, lfp_measure", "def genFrame(self):\n # generate frame-specific data\n frameData = self._genFrameData()\n\n # call parent function to create the complete frame (as bytearray)\n frame = self._genDigiMeshFrame(frameData)\n\n # OBS: never escape-sequence local msg\n return frame", "def frame(self):" ]
[ "0.64260715", "0.6129523", "0.6008771", "0.6003277", "0.5954754", "0.59039664", "0.5864619", "0.5844462", "0.5808007", "0.57920486", "0.57749957", "0.57749957", "0.57749957", "0.57749957", "0.57749957", "0.57749957", "0.5700318", "0.56840724", "0.5669105", "0.566348", "0.566348", "0.566348", "0.566348", "0.566348", "0.56485975", "0.56468177", "0.5633148", "0.5623043", "0.5615939", "0.560757" ]
0.7119006
0
Main pipeline of trackingbydetection. From one image, we can obtain a list of detected objects along with their bounding boxes, labels, and depth. Objects are tracked with the data association solver and a list of trackers.
def main_pipeline(self, image): # detection t0 = datetime.now() bbox_list, score_list, label_list = self.det.inference(image) t1 = datetime.now() logging.info('main pipeline (det): {}'.format(get_tdiff(t0, t1))) # estimation t0 = datetime.now() disp = self.est.inference(image) depth_list = self.est.calc_depth(bbox_list) t1 = datetime.now() logging.info('main pipeline (est): {}'.format(get_tdiff(t0, t1))) # tracker predict t0 = datetime.now() for t in self.t_list: t.predict() t1 = datetime.now() logging.info('main pipeline (trk_pred): {}'.format(get_tdiff(t0, t1))) # associate t0 = datetime.now() matched_pair, unmatched_bbox_list, _ = associate(bbox_list, label_list, self.t_list) t1 = datetime.now() logging.info('main pipeline (da_solver): {}'.format(get_tdiff(t0, t1))) t0 = datetime.now() # update trackers for matched_pair for m in matched_pair: t = self.t_list[m[1]] bbox = bbox_list[m[0]] depth = depth_list[m[0]] est_dict = { 'label': label_list[m[0]], 'score': score_list[m[0]]} t.update(self.frame_idx, bbox, depth, est_dict) # update in-track status of all trackers for t in self.t_list: t.update_status(self.frame_idx) # purge out dead trackers self.t_list = [t for t in self.t_list if t.get_status()] # create new trackers for unmatched_bbox_list for b_idx in unmatched_bbox_list: bbox = bbox_list[b_idx] depth = depth_list[b_idx] est_dict = { 'label': label_list[b_idx], 'score': score_list[b_idx]} self.t_list.append(tracker(self.t_cfg, self.tid_new, bbox, depth, est_dict)) self.tid_new += 1 t1 = datetime.now() logging.info('main pipeline (trk_upd): {}'.format(get_tdiff(t0, t1))) # disparity map for display return disp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_objects(interpreter, image):\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n #boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n #count = int(get_output_tensor(interpreter, 3))\n\n #results = []\n #for i in range(count):\n # if scores[i] >= threshold:\n # result = {\n # #'bounding_box': boxes[i],\n # 'class_id': classes[i],\n # 'score': scores[i]\n # }\n # results.append(result)\n \n \n #print(\"detection results:\\n\" + str(results))\n #return results\n return np.array([int(_class) for _class in classes]), np.array(scores)", "def detect_objects(self, image):\n # Feed the input image to the model\n self.set_input_tensor(image)\n self.model.invoke()\n\n # Get all outputs from the model\n boxes = self.get_output_tensor(0)\n classes = self.get_output_tensor(1)\n scores = self.get_output_tensor(2)\n count = int(self.get_output_tensor(3))\n\n results = []\n for i in range(count):\n result = {\n 'bounding_box': boxes[i],\n 'class_id': int(classes[i]),\n 'score': scores[i]\n }\n results.append(result)\n return results", "def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()", "def __detect_objs(self):\n while True:\n # Wait for input images\n if (not self.__predict_start) or \\\n (self.__img is None):\n continue\n\n # Client for detection\n client = vision.ImageAnnotatorClient()\n\n # Encode image to binary\n _, img_buffer = cv2.imencode(\".jpg\", self.__img)\n img_bytes = img_buffer.tobytes()\n\n # Change to vision Image type\n image = vision.Image(content=img_bytes)\n # Detect Person\n self.__detect_info = client.object_localization(image=image,\n max_results=self.__max_results\n ).localized_object_annotations\n cv2.waitKey(30)", "def detect_objects(self, image):\n preprocessed_image = self._preprocess_image(image)\n\n # Feed the input image to the model\n self._set_input_tensor(preprocessed_image)\n self._interpreter.invoke()\n\n # Get all outputs from the model\n boxes = self._get_output_tensor(0)\n classes = self._get_output_tensor(1)\n scores = self._get_output_tensor(2)\n count = int(self._get_output_tensor(3))\n\n results = []\n for i in range(count):\n if scores[i] >= self.THRESHOLD:\n result = {\n 'bounding_box': boxes[i],\n 'class_id': classes[i],\n 'score': scores[i]\n }\n results.append(result)\n return results", "def __image_pipeline(self, img):\n \n ##### Lane finding pipeline #######\n resized = self.__resize_image(img)\n undistorted = self.__correct_distortion(resized)\n warped = self.__warp_image_to_biv(undistorted)\n thresholded = self.__threshold_image(warped)\n lines = self.__get_lane_lines(thresholded)\n lane_img = self.__draw_lane_lines(undistorted, thresholded, include_stats=True)\n\n\n ##### Vehicle Tracking pipeline #####\n\n hot_windows = self.windFinder.get_hot_windows(img)\n car_boxes, wrap_img = self.vTracker.image_pipeline(img, hot_windows,\n return_img=False) \n # img = cv2.addWeighted(img, 1, wrap_img, 0.5, 0)\n result = self.__draw_boxes(lane_img, car_boxes)\n\n return result", "def object_detector(detector, img_location: str, num_detection=5 ) -> list:\n img = PIL.Image.open(img_location)\n img = np.array(img)\n img = tf.expand_dims(img, axis=0)\n result = detector(img)\n\n ret = []\n\n for i in range(num_detection):\n detection_class_number = int(result['detection_classes'].numpy()[0][i])\n detection_class_name = CLASSES_90[detection_class_number]\n\n detection_score = result['detection_scores'].numpy()[0][i]\n rounded_detection_score = round(float(detection_score), 2)\n\n # Append as a tuple\n ret.append( (detection_class_name, rounded_detection_score) )\n\n return ret", "def main():\n with open(IMAGEPATH_LIST_PATH, \"rt\") as imagepath_list_handle:\n imagepath_list = [line.strip() for line in imagepath_list_handle.readlines()]\n\n object_detector = ObjectDetector(MODEL_PATH)\n\n dataset_json = []\n for imagepath in imagepath_list:\n image = scipy.misc.imread(imagepath)\n detections = object_detector.run(image)\n\n detections_json = {\"path\": imagepath, \"detections\": [det.to_dict() for det in detections]}\n dataset_json.append(detections_json)\n\n with open(DATASET_PATH, \"wt\") as json_handle:\n json.dump(dataset_json, json_handle, sort_keys=True, indent=4)", "def get_object_detections(self):\n detections = self.__get_cropped_detections(self.image)\n return detections", "def process_detections(tracker, detections, nms_max_overlap, frame):\r\n #initialize color map\r\n cmap = plt.get_cmap('tab20b')\r\n colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]\r\n\r\n # run non-maxima supression\r\n boxs = np.array([d.tlwh for d in detections])\r\n scores = np.array([d.confidence for d in detections])\r\n classes = np.array([d.class_name for d in detections])\r\n indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)\r\n detections = [detections[i] for i in indices] \r\n\r\n # Call the tracker\r\n tracker.predict()\r\n tracker.update(detections)\r\n\r\n # update tracks\r\n for track in tracker.tracks:\r\n if not track.is_confirmed() or track.time_since_update > 1:\r\n continue \r\n bbox = track.to_tlbr()\r\n class_name = track.get_class()\r\n \r\n # draw bbox on screen\r\n color = colors[int(track.track_id) % len(colors)]\r\n color = [i * 255 for i in color]\r\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 1)\r\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), \r\n (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)\r\n cv2.putText(frame, class_name + \"-\" + str(track.track_id),(int(bbox[0]), \r\n int(bbox[1]-10)),0, 0.5, (255,255,255), 1)\r\n\r\n # if enable info flag then print details about each track\r\n if FLAGS.info:\r\n print(\"Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}\".format(str(track.track_id), \r\n class_name, (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))))\r\n return frame", "def detect_objects(interpreter, image):\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n count = int(get_output_tensor(interpreter, 3))\n\n results = []\n for i in range(count):\n # Only check for people that meet the threshold\n if classes[i] == 0.0 and scores[i] >= THRESHOLD:\n result = {\n \"bounding_box\": boxes[i],\n \"class_id\": classes[i],\n \"score\": scores[i],\n }\n results.append(result)\n return results", "def visualize_detection(self, image):\n\t\tH, W, _ = image.shape\n\t\tpos_list = self.apply_detection(image)\n\t\tdetections = {}\n\t\thasDetection = False\n\t\tfor i, L in enumerate(pos_list):\n\t\t\ttext, coordinates = L[0], L[1]\n\t\t\tCOLOR = COLORS[text]\n\t\t\tfor x, y, w, h in coordinates:\n\t\t\t\t# prune bad homography points\n\t\t\t\tif x < 0 or y < 0 or x + w > W or \\\n\t\t\t\t y + h > H or w <= 1 or h <= 1:\n\t\t\t\t\tcontinue\n\t\t\t\t# add the detection to the dict for tracking\n\t\t\t\tdetections[self.num_detect] = (x, y, w, h)\n\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text)\n\t\t\t\tself.num_detect += 1\n\t\t\t\thasDetection = True\n\t\t\t\t# if the detection is human\n\t\t\t\tif text == 'face':\n\t\t\t\t\tgender = self.genderDetect.classify(image[y:y+h, x:x+w, :])\n\t\t\t\t\tgender = 'female' if gender[0] < 0.5 else 'male'\n\t\t\t\t\tcv2.putText(image, gender, (x + w // 2 -10, y + h + 15),\n\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\n\t\t\t\timage = cv2.rectangle(image, (x, y), (x + w, y + h), COLOR, 2)\n\t\t\t\tcv2.putText(image, text, (x, y - 5),\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\t\tif hasDetection:\n\t\t\tself.detection_frames[self.num_save] = detections\n\t\tself.num_save +=1\n\t\treturn image", "def track_faces_in_video(self):\r\n\r\n logger.debug('Executing face tracking')\r\n\r\n track_loaded = False\r\n\r\n # Try to load YAML file with tracking results\r\n if os.path.exists(self.track_file_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n track_faces = utils.load_YAML_file(self.track_file_path)\r\n\r\n if track_faces:\r\n self.tracked_faces = track_faces\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n track_loaded = True\r\n\r\n if not track_loaded:\r\n\r\n # Check existence of detection results\r\n\r\n if len(self.detected_faces) == 0:\r\n\r\n # Try to load YAML file\r\n if os.path.exists(self.det_file_path):\r\n\r\n print 'Loading YAML file with detection results'\r\n logger.debug('Loading YAML file with detection results')\r\n\r\n with open(self.det_file_path) as f:\r\n\r\n self.detected_faces = yaml.load(f)\r\n\r\n print 'YAML file with detection results loaded'\r\n logger.debug('YAML file with detection results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No detection results found!'\r\n logger.warning('No detection results found!')\r\n\r\n return\r\n\r\n # Get shot cuts\r\n self.calc_hist_diff()\r\n\r\n print '\\n\\n### Face tracking ###\\n'\r\n logger.debug('\\n\\n### Face tracking ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n self.tracked_faces = []\r\n\r\n self.disc_tracked_faces = []\r\n\r\n # Counter for frames with detected faces\r\n frame_counter = 0\r\n\r\n # If a reduced frame rate is used, frames are less\r\n use_or_fps = c.USE_ORIGINAL_FPS\r\n used_fps = c.USED_FPS\r\n min_segment_duration = c.MIN_SEGMENT_DURATION\r\n tracking_min_int_area = c.TRACKING_MIN_INT_AREA\r\n min_size_width = c.FACE_DETECTION_MIN_SIZE_WIDTH\r\n min_size_height = c.FACE_DETECTION_MIN_SIZE_HEIGHT\r\n max_fr_with_miss_det = c.MAX_FR_WITH_MISSED_DET\r\n use_aligned_face = c.USE_ALIGNED_FACE_IN_TRACKING\r\n\r\n if self.params is not None:\r\n if c.USE_ORIGINAL_FPS_KEY in self.params:\r\n use_or_fps = self.params[c.USE_ORIGINAL_FPS_KEY]\r\n if c.USED_FPS_KEY in self.params:\r\n used_fps = self.params[c.USED_FPS_KEY]\r\n if c.MIN_SEGMENT_DURATION_KEY in self.params:\r\n min_segment_duration = self.params[\r\n c.MIN_SEGMENT_DURATION_KEY]\r\n if c.TRACKING_MIN_INT_AREA_KEY in self.params:\r\n tracking_min_int_area = self.params[\r\n c.TRACKING_MIN_INT_AREA_KEY]\r\n if c.MIN_SIZE_WIDTH_KEY in self.params:\r\n min_size_width = self.params[c.MIN_SIZE_WIDTH_KEY]\r\n if c.MIN_SIZE_HEIGHT_KEY in self.params:\r\n min_size_height = self.params[c.MIN_SIZE_HEIGHT_KEY]\r\n if c.MAX_FR_WITH_MISSED_DET_KEY in self.params:\r\n max_fr_with_miss_det = self.params[\r\n c.MAX_FR_WITH_MISSED_DET_KEY]\r\n if c.USE_ALIGNED_FACE_IN_TRACKING_KEY in self.params:\r\n use_aligned_face = self.params[\r\n c.USE_ALIGNED_FACE_IN_TRACKING_KEY]\r\n\r\n # Minimum duration of a segment in frames\r\n min_segment_frames = int(\r\n math.ceil(self.fps * min_segment_duration))\r\n\r\n if not use_or_fps:\r\n min_segment_frames = int(\r\n math.ceil((used_fps + 1) * min_segment_duration))\r\n\r\n # Make copy of detected faces\r\n detection_list = list(self.detected_faces)\r\n\r\n # Iterate through frames in detected_faces\r\n for detection_dict in detection_list:\r\n\r\n self.progress = 100 * (frame_counter / self.saved_frames)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n elapsed_s = detection_dict[c.ELAPSED_VIDEO_TIME_KEY]\r\n\r\n frame_name = detection_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n faces = detection_dict[c.FACES_KEY]\r\n\r\n face_counter = 0\r\n\r\n # Iterate though faces in frame\r\n for face_dict in faces:\r\n\r\n track_window = face_dict[c.BBOX_KEY]\r\n\r\n left_eye_pos = face_dict[c.LEFT_EYE_POS_KEY]\r\n\r\n right_eye_pos = face_dict[c.RIGHT_EYE_POS_KEY]\r\n\r\n nose_pos = face_dict[c.NOSE_POSITION_KEY]\r\n\r\n file_name = face_dict[c.ALIGNED_FACE_FILE_NAME_KEY]\r\n\r\n # Counter for faces in segment\r\n segment_face_counter = 1\r\n\r\n segment_frame_list = []\r\n\r\n # Start new segment\r\n segment_frame_dict = {c.FRAME_COUNTER_KEY: frame_counter,\r\n c.ELAPSED_VIDEO_TIME_KEY: elapsed_s,\r\n c.DETECTION_BBOX_KEY: track_window,\r\n c.TRACKING_BBOX_KEY: track_window,\r\n c.LEFT_EYE_POS_KEY: left_eye_pos,\r\n c.RIGHT_EYE_POS_KEY: right_eye_pos,\r\n c.NOSE_POSITION_KEY: nose_pos,\r\n c.ALIGNED_FACE_FILE_NAME_KEY: file_name,\r\n c.DETECTED_KEY: True,\r\n c.SAVED_FRAME_NAME_KEY: frame_name}\r\n\r\n segment_frame_list.append(segment_frame_dict)\r\n\r\n aligned_file_path = None\r\n rgb_roi = None\r\n if use_aligned_face:\r\n # Use the aligned face as the\r\n # Region of Interest for tracking\r\n complete_file_name = file_name + '.png'\r\n aligned_file_path = os.path.join(\r\n self.align_path, complete_file_name)\r\n\r\n rgb_roi = cv2.imread(\r\n aligned_file_path, cv2.IMREAD_COLOR)\r\n\r\n else:\r\n # Use detected face as the\r\n # Region of Interest for tracking\r\n x0 = track_window[0]\r\n y0 = track_window[1]\r\n w = track_window[2]\r\n h = track_window[3]\r\n x1 = x0 + w\r\n y1 = y0 + h\r\n\r\n frame_path = os.path.join(\r\n self.frames_path, frame_name)\r\n\r\n # Whole frame\r\n rgb = cv2.imread(frame_path, cv2.IMREAD_COLOR)\r\n\r\n # Face\r\n rgb_roi = rgb[y0:y1, x0:x1]\r\n\r\n if rgb_roi is None:\r\n print('Warning! Face to be tracked is None')\r\n\r\n if use_aligned_face:\r\n logger.warning(\r\n 'Face ' + aligned_file_path + ' is None')\r\n else:\r\n logger.warning(\r\n 'Face from frame ' + frame_name + ' is None')\r\n\r\n face_counter += 1\r\n\r\n continue\r\n\r\n # Convert image to hsv\r\n hsv_roi = cv2.cvtColor(rgb_roi, cv2.COLOR_BGR2HSV)\r\n\r\n mask_roi = cv2.inRange(\r\n hsv_roi, np.array((0., 60., 32.)),\r\n np.array((180., 255., 255.)))\r\n\r\n hist = cv2.calcHist(\r\n [hsv_roi], [0], mask_roi, [16], [0, 180])\r\n\r\n cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)\r\n hist = hist.reshape(-1)\r\n\r\n # Face should not be considered anymore\r\n del (detection_list[frame_counter]\r\n [c.FACES_KEY][face_counter])\r\n\r\n sub_frame_counter = frame_counter + 1\r\n\r\n missed_det_counter = 0\r\n\r\n # Iterate through subsequent frames\r\n for sub_det_dict in detection_list[sub_frame_counter:]:\r\n\r\n # Check if a new shot begins\r\n if sub_frame_counter in self.cut_idxs:\r\n break\r\n\r\n sub_frame_name = sub_det_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n sub_frame_path = os.path.join(\r\n self.frames_path, sub_frame_name)\r\n\r\n # Read image from given path\r\n sub_image = cv2.imread(\r\n sub_frame_path, cv2.IMREAD_COLOR)\r\n\r\n if sub_image is None:\r\n print('Warning! Image is None')\r\n logger.warning(\r\n 'Image ' + sub_frame_path + ' is None')\r\n\r\n continue\r\n\r\n # Convert image to hsv\r\n sub_hsv = cv2.cvtColor(sub_image, cv2.COLOR_BGR2HSV)\r\n\r\n sub_mask = cv2.inRange(sub_hsv,\r\n np.array((0., 60., 32.)),\r\n np.array((180., 255., 255.)))\r\n\r\n # Apply meanshift to get the new location\r\n prob = cv2.calcBackProject(\r\n [sub_hsv], [0], hist, [0, 180], 1)\r\n prob &= sub_mask\r\n term_crit = (cv2.TERM_CRITERIA_EPS\r\n | cv2.TERM_CRITERIA_COUNT, 10, 1)\r\n\r\n track_box, track_window = cv2.CamShift(\r\n prob, track_window, term_crit)\r\n\r\n track_x0 = track_window[0]\r\n track_y0 = track_window[1]\r\n track_w = track_window[2]\r\n track_h = track_window[3]\r\n\r\n # Check size of track window\r\n if ((track_w <= min_size_width)\r\n or (track_h <= min_size_height)):\r\n\r\n break\r\n\r\n segment_frame_dict = {}\r\n\r\n track_list = (\r\n int(track_x0), int(track_y0), int(track_w),\r\n int(track_h))\r\n\r\n segment_frame_dict[c.TRACKING_BBOX_KEY] = track_list\r\n\r\n sub_faces = sub_det_dict[c.FACES_KEY]\r\n\r\n sub_face_counter = 0\r\n\r\n sim = False\r\n\r\n det_bbox = None\r\n\r\n for sub_face_dict in sub_faces:\r\n\r\n det_bbox = sub_face_dict[c.BBOX_KEY]\r\n\r\n # If track window corresponds to\r\n # a detected face,\r\n # delete detection from list\r\n\r\n (sim, int_area, int_area_pct) = utils.is_rect_similar(\r\n track_window, det_bbox, tracking_min_int_area)\r\n\r\n if sim:\r\n # det_face_counter = det_face_counter + 1\r\n\r\n track_window = det_bbox\r\n\r\n break\r\n\r\n sub_face_counter += 1\r\n\r\n t_x0 = track_window[0]\r\n t_y0 = track_window[1]\r\n t_w = track_window[2]\r\n t_h = track_window[3]\r\n\r\n segment_frame_dict[c.DETECTION_BBOX_KEY] = det_bbox\r\n\r\n # If a detected face corresponds to track window\r\n # delete detected face from detection list\r\n\r\n if sim:\r\n\r\n missed_det_counter = 0\r\n\r\n segment_frame_dict[c.DETECTED_KEY] = True\r\n\r\n segment_frame_dict[c.LEFT_EYE_POS_KEY] = (\r\n sub_face_dict[c.LEFT_EYE_POS_KEY])\r\n segment_frame_dict[c.RIGHT_EYE_POS_KEY] = (\r\n sub_face_dict[c.RIGHT_EYE_POS_KEY])\r\n\r\n segment_frame_dict[c.NOSE_POSITION_KEY] = (\r\n sub_face_dict[c.NOSE_POSITION_KEY])\r\n\r\n segment_frame_dict[c.ALIGNED_FACE_FILE_NAME_KEY] = (\r\n sub_face_dict[c.ALIGNED_FACE_FILE_NAME_KEY])\r\n\r\n del (detection_list[sub_frame_counter]\r\n [c.FACES_KEY][sub_face_counter])\r\n\r\n else:\r\n\r\n # Check if distance from last detection\r\n # is too big\r\n missed_det_counter += 1\r\n\r\n if missed_det_counter > max_fr_with_miss_det:\r\n\r\n # Remove last frames and\r\n # interrupt tracking\r\n for i in range(0, max_fr_with_miss_det):\r\n segment_frame_list.pop()\r\n\r\n segment_face_counter = (\r\n segment_face_counter - max_fr_with_miss_det)\r\n\r\n break\r\n\r\n segment_frame_dict[c.DETECTED_KEY] = False\r\n\r\n elapsed_ms = sub_det_dict[c.ELAPSED_VIDEO_TIME_KEY]\r\n\r\n # Update list of frames for segment\r\n segment_frame_dict[\r\n c.FRAME_COUNTER_KEY] = sub_frame_counter\r\n segment_frame_dict[\r\n c.ELAPSED_VIDEO_TIME_KEY] = elapsed_ms\r\n\r\n track_list = (\r\n int(t_x0), int(t_y0), int(t_w), int(t_h))\r\n\r\n segment_frame_dict[c.TRACKING_BBOX_KEY] = track_list\r\n segment_frame_dict[\r\n c.SAVED_FRAME_NAME_KEY] = sub_frame_name\r\n\r\n segment_frame_list.append(segment_frame_dict)\r\n\r\n del sub_image\r\n\r\n sub_frame_counter += 1\r\n\r\n segment_face_counter += 1\r\n\r\n # Segment must be considered only if its number\r\n # of frames is greater or equals than a minimum\r\n if segment_face_counter >= min_segment_frames:\r\n\r\n segments = self.divide_segment_by_face(\r\n segment_frame_list)\r\n\r\n if len(segments) > 0:\r\n self.tracked_faces.extend(segments)\r\n\r\n else:\r\n\r\n segment_dict = {c.FRAMES_KEY: segment_frame_list}\r\n\r\n self.disc_tracked_faces.append(segment_dict)\r\n\r\n # Check histograms of detected faces and\r\n # divide segment accordingly\r\n\r\n face_counter += 1\r\n\r\n frame_counter += 1\r\n\r\n # Create directory for this video\r\n\r\n if not (os.path.exists(self.track_path)):\r\n os.makedirs(self.track_path)\r\n\r\n # Save tracking result in YAML file\r\n utils.save_YAML_file(self.track_file_path, self.tracked_faces)\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for face tracking:', time_in_seconds, 's\\n'\r\n logger.debug('Time for face tracking:', time_in_seconds, 's\\n')\r\n\r\n self.anal_times[c.FACE_TRACKING_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)", "def process_image(self):\n\n detect.main(self.nn_args)", "def get_detections(self):\n frame = self.get_still()\n return detector.process_frame(frame, False)", "def extract_detections(self):\n self.rescue_model.setInput(self.human_blob)\n self.predictions = self.rescue_model.forward()", "def detect_objects(self, image, threshold):\n self.set_input_tensor(image)\n self.interpreter.invoke()\n\n # Get all output details\n boxes = self.get_output_tensor(0)\n classes = self.get_output_tensor(1)\n scores = self.get_output_tensor(2)\n count = int(self.get_output_tensor(3))\n\n results = []\n for i in range(count):\n if scores[i] >= threshold:\n result = {\n 'bounding_box': boxes[i],\n 'class_id': classes[i],\n 'score': scores[i]\n }\n results.append(result)\n return results", "def detect_image_objects(gray, detect_params, detect_type=\"all\", label=-1, verbose=False):\n if detect_type == \"all\":\n detected_rects = detect_all_objects(gray, verbose=verbose, **detect_params)\n elif detect_type == \"primary\":\n detected_rects = detect_primary_objects(gray, verbose=verbose, **detect_params)\n else:\n print(f\"Unrecongized input value for detect_type, {detect_type}, so no objects were detected!\")\n print(\"Please provide a string value for detect_type of either 1) 'all' or 2) 'primary'\")\n detected_rects = None\n if isinstance(detected_rects, np.ndarray):\n features_labels = get_detected_features_labels(gray, detected_rects, label=label, verbose=verbose)\n return features_labels", "def get_detections(self, image):\n self.img = jetson.utils.cudaFromNumpy(image)\n self.width = image.shape[1]\n self.height = image.shape[0]\n detections = self._net.Detect(self.img, self.width, self.height)\n print(\"The inference is happening at \" + str(self._net.GetNetworkFPS()) + \" FPS\")\n return detections, jetson.utils.cudaToNumpy(self.img)", "def detect(img, window_list, pipeline):\n #t = Timer()\n windows = []\n for bbox in window_list:\n window = extract_window(img, bbox)\n windows.append(window)\n windows = np.stack(windows)\n detections = pipeline.predict(windows)\n #print(\"Time to detect: {:.2f}\".format(t.tock()))\n return detections", "def detector(videoframe, facedetection, maskdetection):\n (h, w) = videoframe.shape[:2]\n blobimage = cv2.dnn.blobFromImage(videoframe, 1.0, (224, 224), (104.0, 177.0, 123.0))\n\n facedetection.setInput(blobimage)\n ffinding = facedetection.forward()\n\n face_list = []\n locations = []\n predictions = []\n\n for i in range(0, ffinding.shape[2]):\n credence = ffinding[0, 0, i, 2]\n if credence > 0.6:\n case = ffinding[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x_start, y_start, x_end, y_end) = case.astype(\"int\")\n (x_start, y_start) = (max(0, x_start), max(0, y_start))\n (x_end, y_end) = (min(w - 1, x_end), min(h - 1, y_end))\n\n image = videoframe[y_start:y_end, x_start:x_end]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (224, 224))\n image = img_to_array(image)\n image = preprocess_input(image)\n face_list.append(image)\n locations.append((x_start, y_start, x_end, y_end))\n\n if len(face_list) > 0:\n face_list = np.array(face_list, dtype=\"float32\")\n predictions = maskdetection.predict(face_list, batch_size=32)\n return (locations, predictions)", "def load_detection(self):\n\n root_dir = self.kitti_dataset_path + 'object/'\n kitti_det_loader = kitti_detection_helper.KittiDataset(root_dir,\n self.kitti_date, self.kitti_drive)\n\n type_list = ['Car', 'Van', 'Truck']\n\n # some of the bbox are the same one\n # need to compute average bbox\n for id, object_3d_list in enumerate(kitti_det_loader.all_object_3d):\n for object_3d in object_3d_list:\n\n corner_sublist = []\n\n if object_3d.cls_type not in type_list:\n continue\n\n trackletBox, oTq, yaw = object_3d.generate_corners3d()\n FN = kitti_det_loader.img_idx_list[id]\n\n # only load bbox between start and end frame\n if FN >= self.end_index:\n # print(\"FN {} end {}\".format(FN, self.end_index))\n continue\n\n wTi = np.eye(4)\n\n wRi = self.gt_orientation[FN]\n # note q is from G to I\n wTi[:3, :3] = wRi\n wTi[:3, 3] = np.squeeze(self.gt_position[FN])\n\n wTq = wTi @ self.iTo @ oTq\n\n # force only yaw and x,y translation\n wTq = utils.poseSE32SE2(wTq)\n\n cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T\n corner_sublist.append(cornerPosInVelo)\n\n cornerPosInCam2 = oTq[:3, :3] @ trackletBox + np.tile(oTq[:3, 3], (8, 1)).T\n cornerPosInCam2 = np.eye(3) @ cornerPosInCam2[:3, :]\n\n # used for per frame IOU evaluation\n if FN not in self.local_cuboid_dict.keys():\n self.local_cuboid_dict[FN] = [cornerPosInCam2.T]\n self.local_volume_dict[FN] = [object_3d.h * object_3d.w * object_3d.l]\n self.local_yaw_dict[FN] = [yaw]\n self.local_hwl_dict[FN] = [[object_3d.h, object_3d.w, object_3d.l]]\n else:\n self.local_cuboid_dict[FN].append(cornerPosInCam2.T)\n self.local_volume_dict[FN].append(object_3d.h * object_3d.w * object_3d.l)\n self.local_yaw_dict[FN].append(yaw)\n self.local_hwl_dict[FN].append([object_3d.h, object_3d.w, object_3d.l])\n\n if len(corner_sublist) > 0:\n\n # for plotting\n corner_sublist = np.concatenate([corner_sublist], axis=0)\n corner_sub = np.mean(corner_sublist, axis=0)\n self.corner_list.append(corner_sub)\n\n # for 3D IOU eval\n # used for global IOU\n self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)\n self.volume_list.append(object_3d.h * object_3d.w * object_3d.l)\n\n self.cuboid_list = np.array(self.cuboid_list)\n self.volume_list = np.array(self.volume_list)", "def detectObjects(image):\n\tgrayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)\n\tcvCvtColor(image, grayscale, CV_BGR2GRAY)\n\n\tstorage = cvCreateMemStorage(0)\n\tcvClearMemStorage(storage)\n\tcvEqualizeHist(grayscale, grayscale)\n\tcascade = cvLoadHaarClassifierCascade(\n\t\t'/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml',\n\t\tcvSize(1,1))\n\n\tscalefactor = 1.1 # How much to increase window size each pass\n\tminsize = 50 # Smallest face to detect. Up this if you have small falsepositives\n\tfaces = cvHaarDetectObjects(grayscale, cascade, storage, scalefactor, 50,\n\t\t\t\tCV_HAAR_DO_CANNY_PRUNING, cvSize(minsize, minsize))\n\n\treturn [(f.x, f.y, f.x + f.width, f.y + f.height) for f in faces]", "def draw_detections(self, img, yolo_results):\n\n _, height, _ = img.shape\n for yolo_result in yolo_results:\n class_index = yolo_result.class_index\n obj_name = yolo_result.obj_name\n x = yolo_result.x_min\n y = yolo_result.y_min\n w = yolo_result.width\n h = yolo_result.height\n\n offset = class_index * 123457 % self.meta.classes\n\n red = self._get_color(2, offset, self.meta.classes)\n green = self._get_color(1, offset, self.meta.classes)\n blue = self._get_color(0, offset, self.meta.classes)\n box_width = int(height * 0.006)\n cv2.rectangle(img, (int(x), int(y)), (int(x+w)+1, int(y+h)+1), (red, green, blue), box_width)\n cv2.putText(img, obj_name, (int(x) -1, int(y) -1), cv2.FONT_HERSHEY_PLAIN, 2, (red, green, blue), 2)\n\n return img", "def process_image( self, image ):\n \n # 1. detect cars in image at different scales\n \n # Modify x/y start stop according to scale, cars appear smaller near horizon\n scales = config.scales\n \n box_list = []\n for scale_item in scales:\n scale = scale_item[\"scale\"]\n detects_image, boxes = hog_subsample.find_cars(image, \n scale_item[\"y_start_stop\"][0], scale_item[\"y_start_stop\"][1], \n scale, \n config.settings[\"svc\"], \n config.settings[\"scaler\"], \n config.settings[\"orient\"], \n config.settings[\"pix_per_cell\"], config.settings[\"cell_per_block\"], \n config.settings[\"spatial_size\"], config.settings[\"hist_bins\"],\n scale_item[\"x_start_stop\"][0], scale_item[\"x_start_stop\"][1])\n box_list.extend(boxes)\n \n # Update history\n self.bbox_list_history.append( box_list )\n bbox_list_history_list = sum(self.bbox_list_history.copy(), []) # single list of bbox lists in history\n \n # 2. heat map and threshold\n \n # Make zeros shaped like image\n heat = np.zeros_like(image[:,:,0]).astype(np.float)\n\n # Add heat for each box in box list history\n heat = heatmap_threshold_detection.add_heat(heat, bbox_list_history_list)\n\n # Apply threshold to help remove false positives\n heat_threshold = config.heatmap_threshold\n heat = heatmap_threshold_detection.apply_threshold(heat, heat_threshold)\n\n # Find final boxes from heatmap using label function\n heatmap = np.clip(heat, 0, 255) # only need to clip if there is more than 255 boxes around a point?\n labels = label(heatmap)\n boxed_image = heatmap_threshold_detection.draw_labeled_bboxes(np.copy(image), labels)\n \n # frame image annotation\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(boxed_image,\"Frame:{}\".format(config.count), (10,100), font, 1, (255,255,255), 2 ,cv2.LINE_AA )\n \n return boxed_image", "def detect_objects(snap):\n client = vision.ImageAnnotatorClient()\n print(snap)\n\n with open(snap, 'rb') as im_file:\n content = im_file.read()\n image = vision.Image(content=content)\n\n objects = client.object_localization(image=image).localized_object_annotations\n\n print(f\"Found {len(objects)} objects\")\n [print(f\"{objet.name} : {round(objet.score*100,2)}\") for objet in objects]\n \n return objects", "def pipeline(image):\n # undistort image\n undistorted_image = undistort_image(image)\n superimposed_image = find_lanes(undistorted_image)\n labels = find_vehicles(undistorted_image)\n\n draw_img = draw_labeled_bboxes(superimposed_image, labels)\n\n \n return draw_img", "def show_tracked_people(self):\r\n\r\n # Check existence of tracking results\r\n\r\n key_frames_path = os.path.join(\r\n self.track_path, c.FACE_RECOGNITION_KEY_FRAMES_DIR)\r\n\r\n if not(os.path.exists(key_frames_path)):\r\n\r\n os.makedirs(key_frames_path)\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n if os.path.exists(self.track_file_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n\r\n with open(self.track_file_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n\r\n return\r\n\r\n p_counter = 0\r\n\r\n for segment_dict in self.tracked_faces:\r\n\r\n frame_list = segment_dict[c.FRAMES_KEY]\r\n\r\n # Choose central frame in segment\r\n frames_nr = len(frame_list)\r\n\r\n if frames_nr >= 1:\r\n\r\n middle_idx = int(math.ceil(frames_nr/2.0) - 1)\r\n\r\n middle_frame_dict = frame_list[middle_idx]\r\n\r\n frame_name = middle_frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n frame_path = os.path.join(self.frames_path, frame_name)\r\n\r\n image = cv2.imread(frame_path, cv2.IMREAD_COLOR)\r\n\r\n # Add tracking window to image as red rectangle\r\n track_bbox = middle_frame_dict[c.TRACKING_BBOX_KEY]\r\n\r\n x0 = track_bbox[0]\r\n x1 = x0 + track_bbox[2]\r\n y0 = track_bbox[1]\r\n y1 = y0 + track_bbox[3]\r\n\r\n cv2.rectangle(\r\n image, (x0, y0), (x1, y1), (0, 0, 255), 3, 8, 0)\r\n\r\n # Save image\r\n fr_name = '%07d.png' % p_counter\r\n\r\n fr_path = os.path.join(key_frames_path, fr_name)\r\n\r\n cv2.imwrite(\r\n fr_path, image, [cv.CV_IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n del image\r\n\r\n p_counter += 1", "def run(self):\n self.track_len = []\n # debug\n while self._segment_index <= self._segment_cnt:\n if self._segment_index < 0: # Uncomment this block to debug specific segment\n self._segment_index += 1\n continue\n # run association\n print \"[Tracking]\\tSegment index:\\t{} Total segment num:\\t{}\".format(self._segment_index, self._segment_cnt)\n start = cv2.getTickCount()\n \n self._run_segment()\n print \"[Tracking]\\tSegment start:\\t{} Segment end\\t{}\".format(self._segment_start_fid,\n self._segment_end_fid)\n # dump into file\n \"\"\"\n seg_name = 'segment_{}.track'.format(self._segment_index)\n seg_file = os.path.join(self._segment_dir, seg_name)\n self._segments_path.append(seg_file)\n Track.dump_to_track_file(self._high_level_tracks, save_name=seg_file)\n print \"Track contains {} high level tracks\".format(len(self._high_level_tracks))\n \"\"\"\n self._segment_index += 1\n end = cv2.getTickCount()\n print \"[Tracking]\\tTime:\\t{} seconds\".format(float(end - start) / cv2.getTickFrequency())\n if P['debug']:\n pos_feature_num = self.pos_arr.shape[0]\n neg_feature_num = self.neg_arr.shape[0]\n pos_arr = np.hstack((self.pos_arr, np.ones(shape=(pos_feature_num, 1))))\n neg_arr = np.hstack((self.neg_arr, np.zeros(shape=(neg_feature_num, 1))))\n np.savetxt(os.path.join(\"../feature_classifier/\", \"{}_pos_feature.txt\".format(self._video_name)), pos_arr)\n np.savetxt(os.path.join(\"../feature_classifier/\", \"{}_neg_feature.txt\".format(self._video_name)), neg_arr)\n\n final_track_save_file = os.path.join(self._save_dir, self._video_name + \"_final_merged.track\")\n mot_track_save_file = os.path.join(self._save_dir, self._video_name + \".txt\")\n Track.dump_to_track_file_no_feature(self._final_tracks, final_track_save_file, self._calib_w, self._calib_h)\n Track.dump_track_with_mot_format(self._final_tracks, mot_track_save_file,)\n print(\"there are {} tracklet in final merged track\".format(len(self._final_tracks)))", "def object_detection(self):\r\n pass" ]
[ "0.6946796", "0.6760437", "0.6635849", "0.66155344", "0.6591393", "0.65542084", "0.6482883", "0.64753896", "0.63018554", "0.63015753", "0.62848926", "0.6262375", "0.61878157", "0.6185116", "0.6184772", "0.6178074", "0.61592215", "0.61547065", "0.61352974", "0.60970944", "0.60804546", "0.60657936", "0.605511", "0.60466117", "0.6031229", "0.60287243", "0.60110193", "0.59807473", "0.595605", "0.593786" ]
0.6867465
1
Trajectory pipeline of trackingbydetection. Given a previous egomotion transformation matrix and a triplet of images, we can obtain the egomotion for the new frame and accumulate it on previous pose to generate 3D coordinate transformation matrix. Then all objects are projected to 3D coordinate to generate absolute trajectories. Those trajectories are stored in the dictionary of each tracker.
def traj_pipeline(self, prev_trmat=None): # image_seq = [image(frame_idx-2), image(frame_idx-1), image(frame_idx)] # egomotion update egomo = self.est.get_egomotion(self.image_seq) # egomotion transformation assert self.frame_idx >= 2, 'invalid self.frame_idx' if prev_trmat is None: assert self.frame_idx == 2, 'invalid self.frame_idx' # initialization of ego transformation matrix init_trmat = egomo_vec2mat(self.init_egomo_vec) prev_trmat = np.matmul(init_trmat, egomo_vec2mat(egomo[0])) # frame 0 to 1 egomo_trmat = np.matmul(prev_trmat, egomo_vec2mat(egomo[1])) # tracker list update for t in self.t_list: # skip lost trackers if t.get_status()==False: continue # bounding box & depth bbox, depth = t.get_bbox(), t.get_depth() # project to 3d camera coordinate p3d_cam = cam_proj(self.k_mat, bbox, depth) # transform to world coordinate p3d = coord_transform(egomo_trmat, p3d_cam) t.add_attr_to_est_dict('traj', p3d) return egomo_trmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main_pipeline(self, image):\n # detection\n t0 = datetime.now()\n bbox_list, score_list, label_list = self.det.inference(image)\n t1 = datetime.now()\n logging.info('main pipeline (det): {}'.format(get_tdiff(t0, t1)))\n \n # estimation\n t0 = datetime.now()\n disp = self.est.inference(image)\n depth_list = self.est.calc_depth(bbox_list)\n t1 = datetime.now()\n logging.info('main pipeline (est): {}'.format(get_tdiff(t0, t1)))\n \n # tracker predict\n t0 = datetime.now()\n for t in self.t_list:\n t.predict()\n t1 = datetime.now()\n logging.info('main pipeline (trk_pred): {}'.format(get_tdiff(t0, t1)))\n \n # associate\n t0 = datetime.now()\n matched_pair, unmatched_bbox_list, _ = associate(bbox_list, label_list, self.t_list)\n t1 = datetime.now()\n logging.info('main pipeline (da_solver): {}'.format(get_tdiff(t0, t1)))\n \n t0 = datetime.now()\n # update trackers for matched_pair\n for m in matched_pair:\n t = self.t_list[m[1]]\n bbox = bbox_list[m[0]]\n depth = depth_list[m[0]]\n est_dict = {\n 'label': label_list[m[0]],\n 'score': score_list[m[0]]}\n t.update(self.frame_idx, bbox, depth, est_dict)\n \n # update in-track status of all trackers\n for t in self.t_list:\n t.update_status(self.frame_idx)\n \n # purge out dead trackers\n self.t_list = [t for t in self.t_list if t.get_status()]\n\n # create new trackers for unmatched_bbox_list\n for b_idx in unmatched_bbox_list:\n bbox = bbox_list[b_idx]\n depth = depth_list[b_idx]\n est_dict = {\n 'label': label_list[b_idx],\n 'score': score_list[b_idx]}\n self.t_list.append(tracker(self.t_cfg, self.tid_new, bbox, depth, est_dict))\n self.tid_new += 1\n\n t1 = datetime.now()\n logging.info('main pipeline (trk_upd): {}'.format(get_tdiff(t0, t1)))\n\n # disparity map for display\n return disp", "def track_features(self):\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Compute a rough relative rotation which takes a vector \r\n # from the previous frame to the current frame.\r\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\r\n\r\n # Organize the features in the previous image.\r\n prev_ids = []\r\n prev_lifetime = []\r\n prev_cam0_points = []\r\n prev_cam1_points = []\r\n\r\n for feature in chain.from_iterable(self.prev_features):\r\n prev_ids.append(feature.id)\r\n prev_lifetime.append(feature.lifetime)\r\n prev_cam0_points.append(feature.cam0_point)\r\n prev_cam1_points.append(feature.cam1_point)\r\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\r\n\r\n # Number of the features before tracking.\r\n self.num_features['before_tracking'] = len(prev_cam0_points)\r\n\r\n # Abort tracking if there is no features in the previous frame.\r\n if len(prev_cam0_points) == 0:\r\n return\r\n\r\n # Track features using LK optical flow method.\r\n curr_cam0_points = self.predict_feature_tracking(\r\n prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)\r\n\r\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.prev_cam0_pyramid, self.curr_cam0_pyramid,\r\n prev_cam0_points.astype(np.float32), \r\n curr_cam0_points.astype(np.float32), \r\n **self.config.lk_params)\r\n \r\n # Mark those tracked points out of the image region as untracked.\r\n for i, point in enumerate(curr_cam0_points):\r\n if not track_inliers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n track_inliers[i] = 0\r\n\r\n # Collect the tracked points.\r\n prev_tracked_ids = select(prev_ids, track_inliers)\r\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\r\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\r\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\r\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\r\n\r\n # Number of features left after tracking.\r\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\r\n\r\n # Outlier removal involves three steps, which forms a close\r\n # loop between the previous and current frames of cam0 (left)\r\n # and cam1 (right). Assuming the stereo matching between the\r\n # previous cam0 and cam1 images are correct, the three steps are:\r\n #\r\n # prev frames cam0 ----------> cam1\r\n # | |\r\n # |ransac |ransac\r\n # | stereo match |\r\n # curr frames cam0 ----------> cam1\r\n #\r\n # 1) Stereo matching between current images of cam0 and cam1.\r\n # 2) RANSAC between previous and current images of cam0.\r\n # 3) RANSAC between previous and current images of cam1.\r\n #\r\n # For Step 3, tracking between the images is no longer needed.\r\n # The stereo matching results are directly used in the RANSAC.\r\n\r\n # Step 1: stereo matching.\r\n curr_cam1_points, match_inliers = self.stereo_match(\r\n curr_tracked_cam0_points)\r\n\r\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\r\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\r\n prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)\r\n prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)\r\n curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)\r\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\r\n\r\n # Number of features left after stereo matching.\r\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\r\n\r\n # Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.\r\n # cam0_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam0_points, curr_matched_cam0_points,\r\n # cam0_R_p_c, self.cam0_intrinsics, \r\n # self.cam0_distortion_model, self.cam0_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n\r\n # cam1_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam1_points, curr_matched_cam1_points,\r\n # cam1_R_p_c, self.cam1_intrinsics, \r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\r\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\r\n\r\n # Number of features after ransac.\r\n after_ransac = 0\r\n for i in range(len(cam0_ransac_inliers)):\r\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\r\n continue \r\n row = int(curr_matched_cam0_points[i][1] / grid_height)\r\n col = int(curr_matched_cam0_points[i][0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n\r\n grid_new_feature = FeatureMetaData()\r\n grid_new_feature.id = prev_matched_ids[i]\r\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\r\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\r\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\r\n prev_matched_lifetime[i] += 1\r\n\r\n self.curr_features[code].append(grid_new_feature)\r\n after_ransac += 1\r\n self.num_features['after_ransac'] = after_ransac\r\n\r\n # Compute the tracking rate.\r\n # prev_feature_num = sum([len(x) for x in self.prev_features])\r\n # curr_feature_num = sum([len(x) for x in self.curr_features])\r", "def _tag_pose_callback(self):\n for msg in self.pose_msgs:\n\n detections = msg.detections\n if (len(msg.detections)==0):\n continue\n\n exponential_coordinates = []\n translations = []\n for detection in detections:\n self._T_tag2cam = get_T(detection.pose.pose.pose)\n self._marker_num = detection.id\n current_header = detection.pose.header\n inter_pose = self._world_map[self._marker_num, :]\n inter_pose = np.squeeze(inter_pose)\n\n self._T_tag2world = get_tag2world(inter_pose)\n self._T = np.dot(self._T_tag2world, np.linalg.inv(self._T_tag2cam))\n\n T = np.dot(tf.transformations.inverse_matrix(self.Previous_T), self._T)\n angle, direc, point = tf.transformations.rotation_from_matrix(T)\n translation = tf.transformations.translation_from_matrix(T)\n\n exponential_coordinate = direc*angle\n o = tf.transformations.translation_from_matrix(self._T)\n\n if o[2] < 0.697 and o[0] < -0.9 and o[0] > -4 and o[1] < -0.8 and o[1] > -4:\n if self.Previous_time != None:\n time_interval = detection.pose.header.stamp.to_sec() - self.Previous_time\n angular_velocity = angle / time_interval\n translational_velocity = np.linalg.norm(translation) / time_interval\n\n if (np.abs(angular_velocity) < 0.9) and (translational_velocity < 3):\n exponential_coordinates.append(exponential_coordinate)\n translations.append(translation)\n else:\n exponential_coordinates.append(exponential_coordinate)\n translations.append(translation)\n\n if len(exponential_coordinates):\n exponential_coordinates = np.array(exponential_coordinates)\n exponential_coordinates = np.mean(exponential_coordinates, axis=0)\n\n translations = np.array(translations)\n translations = np.mean(translations, axis=0)\n\n angle = np.linalg.norm(exponential_coordinates)\n direc = exponential_coordinate / angle\n\n T = tf.transformations.rotation_matrix(angle, direc)\n T[:3, 3] = translations\n self._T = np.dot(self.Previous_T, T)\n\n q = tf.transformations.quaternion_from_matrix(self._T)\n o = tf.transformations.translation_from_matrix(self._T)\n if q[0] < 0:\n q = -q;\n\n self.poses.append(np.concatenate([q, o]))\n self.pose_times.append(msg.header.stamp.to_sec())\n\n self.Previous_T = self._T\n self.Previous_time = msg.header.stamp.to_sec()\n\n self.poses = np.array(self.poses)\n self.pose_times = np.array(self.pose_times)", "def match_based_on_spatial_temperal_prior_test_2(tracker_record_1, tracker_record_2, pt_obj_1, pt_obj_2, associate_dict, t_interval=30):\n print(\"===== Get in the match_based_on_spatial_temperal_prior_test_2! ===== \")\n \n # file path\n device_id_1 = 0\n device_id_2 = 1\n img_root_1 = data_path[device_id_1]\n img_root_2 = data_path[device_id_2]\n # save_root =\n \n obj_single_camera_stp_cam_1 = SingleCameraSTP(tracker_record_1, pt_obj_1)\n obj_single_camera_stp_cam_2 = SingleCameraSTP(tracker_record_2, pt_obj_2)\n \n print(obj_single_camera_stp_cam_1.perspective_trace)\n print(obj_single_camera_stp_cam_1.motion_params_4_each)\n obj_multi_cameras_stp_c1c2 = MultiCamerasSTP(\n obj_single_camera_stp_cam_1,\n obj_single_camera_stp_cam_2,\n associate_dict)\n\n # # ===== TEST:coord_transformer_test =====\n # coord_transformer_test(obj_multi_cameras_stp_c1c2)\n # obj_multi_cameras_stp_c1c2.get_start_point_transform()\n \n pt_box_info_1 = obj_multi_cameras_stp_c1c2.obj_single_camera_stp_cam_1.perspective_trace\n pt_box_info_2 = obj_multi_cameras_stp_c1c2.obj_single_camera_stp_cam_2.perspective_trace\n \n # Test on object id '1'\n object_id = '0'\n \n for i in range(np.min([len(pt_box_info_1[object_id]), len(pt_box_info_2[object_id])])):\n f1 = i\n f2 = i\n fname_1 = str(pt_box_info_1[object_id][f1][1])+'.jpg'\n fname_2 = str(pt_box_info_2[object_id][f2][1])+'.jpg'\n \n img_1 = cv2.imread(os.path.join(img_root_1, fname_1))\n img_2 = cv2.imread(os.path.join(img_root_2, fname_2))\n \n cam_1_x = pt_box_info_1[object_id][f1][0][0]\n cam_1_y = pt_box_info_1[object_id][f1][0][1]\n \n cam_2_x = pt_box_info_2[object_id][f2][0][0]\n cam_2_y = pt_box_info_2[object_id][f2][0][1]\n \n t_interval = pt_box_info_2[object_id][f2][1]-pt_box_info_1[object_id][f1][1]\n \n print(cam_1_x, cam_1_y)\n print(cam_2_x, cam_2_y)\n print(t_interval)\n # print(obj_multi_cameras_stp_c1c2.starting_point)\n \n p_map = obj_multi_cameras_stp_c1c2.get_probability_map(cam_1_x, cam_1_y, t_interval, height=210, width=80)\n p_map = cv2.applyColorMap(p_map, cv2.COLORMAP_JET)\n p = obj_multi_cameras_stp_c1c2.get_probability(cam_2_x, cam_2_y, cam_1_x, cam_1_y, t_interval)\n print(p)\n # dist = obj_multi_cameras_stp_c1c2.get_distance(cam_2_x,cam_2_y,cam_1_x,cam_1_y,t_interval)\n p_map = cv2.resize(p_map, (int(pt_obj_2.transformed_width_for_disp), int(pt_obj_2.transformed_height_for_disp)))\n p_map = cv2.flip(p_map, 0) # 0:vertical flip\n pt_color_p_map = pt_obj_2.get_inverse_disp_transform(p_map)\n \n alpha = 0.5\n img_3 = cv2.addWeighted(img_2, alpha, pt_color_p_map, 1-alpha, 0)\n \n img_4 = np.zeros((int(img_2.shape[0]), int(img_2.shape[1]*2), 3), np.uint8)\n img_4[:, :img_1.shape[1], :] = img_1\n img_4[:, img_1.shape[1]:, :] = img_3\n\n # cv2.namedWindow('img_1',cv2.WINDOW_NORMAL)\n # cv2.namedWindow('img_2',cv2.WINDOW_NORMAL)\n cv2.namedWindow('img_4', cv2.WINDOW_NORMAL)\n \n # cv2.imshow('img_1',img_1)\n # cv2.imshow('img_2',img_2)\n cv2.imshow('img_4', img_4)\n \n cv2.imwrite(os.path.join(save_root, fname_1), img_4)\n \n cv2.waitKey()\n return", "def get_tracking(good, kp_pts3d1, kp_pts3d2, theta):\n src_pts = np.float32([ kp_pts3d1[m.queryIdx] for m in good ])\n dst_pts = np.float32([ kp_pts3d2[m.trainIdx] for m in good ])\n \n count = 0\n src = []\n dst = []\n src_1 = []\n dst_1 = []\n sc = []\n for i in range(len(src_pts)):\n src_1.append([src_pts[i,0], src_pts[i,2]])\n dst_1.append([dst_pts[i,0], dst_pts[i,2]])\n if((abs(src_pts[i,1]-dst_pts[i,1])<0.05) and (src_pts[i,2]!=0) and (dst_pts[i,2]!= 0) ):\n count+=1\n src.append([src_pts[i,0], src_pts[i,2]])\n d = [dst_pts[i,0], dst_pts[i,2]]\n dst.append([d[0], d[1]])\n sc.append(1)\n else:\n sc.append(0)\n src = np.asarray(src).reshape(-1,2)\n dst = np.asarray(dst).reshape(-1,2)\n \n src_1 = np.asarray(src_1).reshape(-1,2)\n dst_1 = np.asarray(dst_1).reshape(-1,2)\n H1,score = cv2.estimateAffinePartial2D(src, dst, ransacReprojThreshold=0.50)\n \n theta = np.arctan2(H1[0,1], H1[0,0])\n scale = H1[0,0] / np.cos(theta)\n tx = H1[0,2]\n ty = H1[1,2]\n score1 = score.copy()\n \n new_H = np.eye(3);\n new_H[0,0] = np.cos(theta)\n new_H[0,1] = np.sin(theta)\n new_H[0,2] = tx\n new_H[1,0] = -np.sin(theta)\n new_H[1,1] = np.cos(theta)\n new_H[1,2] = ty\n \n return new_H, score1, scale, tx, ty, theta", "def track(self, image):\n\n keyframe_image = self.images[self.keyframe_inds[-1]]\n images = np.stack([keyframe_image, image], axis=0)\n\n keyframe_pose = self.poses[self.keyframe_inds[-1]]\n poses = np.stack([keyframe_pose, self.pose_cur], axis=0)\n\n keyframe_depth = self.depths[self.keyframe_inds[-1]]\n depths = keyframe_depth[np.newaxis]\n\n edges = np.array([[0,1]], dtype=np.int32)\n fixed = np.int32(0)\n\n feed_dict = {\n self.images_placeholder: images,\n self.depths_placeholder: depths,\n self.poses_placeholder: poses,\n self.edges_placeholder: edges,\n self.fixed_placeholder: fixed,\n self.init_placeholder: False,\n self.intrinsics_placeholder: self.intrinsics}\n\n updated_poses = self.sess.run(self.outputs['poses'], feed_dict=feed_dict)\n\n # relative pose between keyframe and new pose\n dP = np.matmul(updated_poses[1], np.linalg.inv(updated_poses[0])) \n self.pose_cur = np.matmul(dP, keyframe_pose)\n\n return pose_distance(dP)", "def parse_trajectory(kpt3d, m_camera):\n\n def _dist(a, b):\n assert a.shape == (14, 4)\n assert b.shape == (14, 4)\n valid_kpt = np.logical_and(a[:, -1] > 0.9, b[:, -1] > 0.9)\n if np.sum(valid_kpt) == 0:\n return 1e10\n d = np.sqrt(np.square(a[:, :-1] - b[:, :-1]).sum(axis=1))\n return np.sum(d * valid_kpt) / valid_kpt.sum()\n\n nr_traj = len(kpt3d[0]) # track proposals appear in first frame.\n history = [[] for _ in range(nr_traj)]\n\n # tracking\n for i_prop in range(nr_traj):\n history[i_prop].append(i_prop)\n for t in range(1, len(kpt3d), 1):\n print(\"t: {}\".format(t))\n for idx_prop in range(nr_traj):\n prev_kpt = kpt3d[t - 1][history[idx_prop][t - 1]]\n candidates = kpt3d[t]\n d = [_dist(prev_kpt, k) for k in kpt3d[t]]\n print(\"idx_prop = {}, d = {}\".format(idx_prop, d))\n history[idx_prop].append(np.argmin(d))\n\n # check: visualize trajectory\n def _check_vis():\n save_root = \"./vis_traj_3d\"\n if not os.path.exists(save_root):\n os.makedirs(save_root)\n for idx_traj in range(nr_traj):\n print(\"idx_traj: {}\".format(idx_traj))\n kpt_traj = [kpt3d[t][i][:, :-1] for t, i in enumerate(history[idx_traj])]\n traj_path = os.path.join(save_root, \"troj_{:02d}\".format(idx_traj))\n if not os.path.exists(traj_path):\n os.mkdir(traj_path)\n for i, k in enumerate(tqdm(kpt_traj)):\n filename = os.path.join(traj_path, \"{:03d}.png\".format(i))\n draw_kpt_3d(k.reshape(1, *k.shape), filename)\n\n # save trajectory and camera parameter\n toy_data_root = \"data_toy\"\n if not os.path.exists(toy_data_root):\n os.mkdir(toy_data_root)\n traj = []\n for idx_traj in range(nr_traj):\n traj.append([kpt3d[t][i][:, :-1] for t, i in enumerate(history[idx_traj])])\n np.save(os.path.join(toy_data_root, \"trajs.npy\"), np.asarray(traj))\n np.save(os.path.join(toy_data_root, \"m_camera.npy\"), np.asarray(m_camera))", "def track_motion(self, video=None, set_roi=False):\r\n \r\n if video is None:\r\n video = self.video_buffer\r\n if set_roi:\r\n roi = self.get_roi(video=video)\r\n \r\n video_track = video.copy()\r\n motion_tracker = []\r\n # Generate different colors for tracking display \r\n color = np.random.randint(0,255,(100,3))\r\n \r\n # params for ShiTomasi corner detection\r\n feature_params = dict( maxCorners = 100,\r\n qualityLevel = 0.3,\r\n minDistance = 5,\r\n blockSize = 7 )\r\n # Parameters for lucas kanade optical flow\r\n lk_params = dict( winSize = (15,15),\r\n maxLevel = 8,\r\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\r\n\r\n old_gray = cv2.cvtColor(video[0], cv2.COLOR_BGR2GRAY)\r\n # Create mask for drawing\r\n mask = np.zeros_like(video[0])\r\n # Mask to dectate the features to track\r\n features_mask = np.zeros_like(old_gray)\r\n features_mask[roi['x1']: roi['x2'], roi['y1']: roi['y2']] = old_gray[roi['x1']: roi['x2'], roi['y1']: roi['y2']]\r\n # Find corners in first frame\r\n p0 = cv2.goodFeaturesToTrack(features_mask, mask = None, **feature_params)\r\n \r\n for idx in range(1, video.shape[0]):\r\n new_gray = cv2.cvtColor(video[idx], cv2.COLOR_BGR2GRAY)\r\n # calculate optical flow\r\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, new_gray, p0, None, **lk_params)\r\n \r\n # Get good points\r\n good_old = p0[st==1]\r\n good_new = p1[st==1]\r\n motion_tracker.append(good_new)\r\n for i, (old, new) in enumerate(zip(good_old, good_new)):\r\n (ox, oy) = old.reval()\r\n (nx, ny) = new.ravel()\r\n mask = cv2.circle(mask, (nx, ny), 5, color[i].tolist(), -1)\r\n frame = cv2.add(video[idx], mask)\r\n video_track[idx] = frame\r\n \r\n cv2.imshow('frame', frame)\r\n if cv2.waitKey(30) & 0xFF==27:\r\n break\r\n # Updat old frames and points before checking next frame\r\n \r\n old_gray = new_gray.copy()\r\n p0 = p1.resapr(-1,1,2)\r\n cv2.destroyAllWindows()\r\n \r\n return video_track, motion_tracker", "def forward(self, motion_feats: torch.Tensor, img: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n # Encode map/scene\n img_feats = self.feats(img)\n img_feats = self.relu(self.conv_grid(img_feats))\n\n # Concatenate motion and position features\n feats = torch.cat((img_feats, motion_feats), dim=1)\n\n # Output goal and path rewards\n r_goal = self.logsigmoid(self.op_goal(self.relu(self.agg2_goal(self.relu(self.agg1_goal(feats))))))\n r_path = self.logsigmoid(self.op_path(self.relu(self.agg2_path(self.relu(self.agg1_path(feats))))))\n\n # Concatenate along channel dimension\n r = torch.cat((r_path, r_goal), 1)\n\n return r, img_feats", "def track_points(self, image):\r\n\r\n\t\tif self.features != []:\r\n\t\t\tself.step(image) # move to the next frame\r\n\t\t\t\r\n\t\t\t# reshape to fit input format\r\n\t\t\ttmp = np.float32(self.features).reshape(-1, 1, 2)\r\n\t\t\t\r\n\t\t\t# calculate optical flow\r\n\t\t\tfeatures,status,track_error = cv2.calcOpticalFlowPyrLK(self.prev_gray,self.gray,tmp,None,**lk_params)\r\n\r\n\t\t\t# remove points lost\r\n\t\t\tself.features = [p for (st,p) in zip(status,features) if st]\r\n\t\t\t\r\n\t\t\t# clean tracks from lost points\r\n\t\t\tfeatures = np.array(features).reshape((-1,2))\r\n\t\t\tfor i,f in enumerate(features):\r\n\t\t\t\tself.tracks[i].append(f)\r\n\t\t\tndx = [i for (i,st) in enumerate(status) if not st]\r\n\t\t\tndx.reverse() #remove from back\r\n\t\t\tfor i in ndx:\r\n\t\t\t\tself.tracks.pop(i)\r\n\t\t\t\r\n\t\t\tself.prev_gray = self.gray", "def load_trajectories(*paths, use_vision=True, vision_interval=10,\n use_proprioception=True, use_haptics=True,\n use_mass=False, use_depth=False,\n image_blackout_ratio=0,\n sequential_image_rate=1,\n start_timestep=0,\n direction_filter=None, \n **unused):\n trajectories = []\n\n assert 1 > image_blackout_ratio >= 0\n assert image_blackout_ratio == 0 or sequential_image_rate == 1\n\n for path in paths:\n count = np.float('inf')\n if type(path) == tuple:\n path, count = path\n assert type(count) == int\n\n with utils.TrajectoriesFile(path) as f:\n # Iterate over each trajectory\n for i, trajectory in enumerate(f):\n if i >= count:\n break\n\n timesteps = len(trajectory['Cylinder0_pos'])\n\n # Define our state: we expect this to be:\n # (x, y, cos theta, sin theta, mass, friction)\n # TODO: add mass, friction\n state_dim = 2\n states = np.full((timesteps, state_dim), np.nan)\n\n states[:, :2] = trajectory['Cylinder0_pos'][:, :2] # x, y\n if use_mass:\n states[:, 3] = trajectory['Cylinder0_mass'][:, 0]\n \n\n\n # states[:, 2] = np.cos(trajectory['object_z_angle'])\n # states[:, 3] = np.sin(trajectory['object_z_angle'])\n # states[:, 5] = trajectory['Cylinder0_friction'][:, 0]\n\n # Pull out observations\n ## This is currently consisted of:\n ## > gripper_pos: end effector position\n ## > gripper_sensors: F/T, contact sensors\n ## > image: camera image\n\n observations = {}\n observations['gripper_pos'] = trajectory['eef_pos']\n assert observations['gripper_pos'].shape == (timesteps, 3)\n\n observations['gripper_sensors'] = np.concatenate((\n trajectory['force'],\n trajectory['contact'][:, np.newaxis],\n ), axis=1)\n assert observations['gripper_sensors'].shape[1] == 7\n\n if not use_proprioception:\n observations['gripper_pos'][:] = 0\n if not use_haptics:\n observations['gripper_sensors'][:] = 0\n\n if 'raw_image' in trajectory:\n observations['raw_image'] = trajectory['raw_image']\n observations['image'] = np.zeros_like(trajectory['image'])\n if use_vision:\n for i in range(len(observations['image'])):\n index = (i // vision_interval) * vision_interval\n index = min(index, len(observations['image']))\n blackout_chance = np.random.uniform()\n # if blackout chance > ratio, then fill image\n # otherwise zero\n if image_blackout_ratio == 0 and i % sequential_image_rate == 0:\n observations['image'][i] = trajectory['image'][index]\n\n if blackout_chance > image_blackout_ratio and sequential_image_rate == 1:\n observations['image'][i] = trajectory['image'][index]\n\n observations['depth'] = np.zeros_like(trajectory['depth'])\n if use_depth:\n for i in range(len(observations['depth'])):\n index = (i // vision_interval) * vision_interval\n index = min(index, len(observations['depth']))\n observations['depth'][i] = trajectory['depth'][index]\n\n # Pull out controls\n ## This is currently consisted of:\n ## > previous end effector position\n ## > end effector position delta\n ## > binary contact reading\n eef_positions = trajectory['eef_pos']\n eef_positions_shifted = np.roll(eef_positions, shift=1, axis=0)\n eef_positions_shifted[0] = eef_positions[0]\n controls = np.concatenate([\n eef_positions_shifted,\n eef_positions - eef_positions_shifted,\n trajectory['contact'][:, np.newaxis],\n ], axis=1)\n assert controls.shape == (timesteps, 7)\n\n # Normalization\n\n observations['gripper_pos'] -= np.array(\n [[0.46806443, -0.0017836, 0.88028437]], dtype=np.float32)\n observations['gripper_pos'] /= np.array(\n [[0.02410769, 0.02341035, 0.04018243]], dtype=np.float32)\n observations['gripper_sensors'] -= np.array(\n [[4.9182904e-01, 4.5039989e-02, -3.2791464e+00,\n -3.3874984e-03, 1.1552566e-02, -8.4817986e-04,\n 2.1303751e-01]], dtype=np.float32)\n observations['gripper_sensors'] /= np.array(\n [[1.6152629, 1.666905, 1.9186896, 0.14219016, 0.14232528,\n 0.01675198, 0.40950698]], dtype=np.float32)\n states -= np.array([[0.4970164, -0.00916641]])\n states /= np.array([[0.0572766, 0.06118315]])\n controls -= np.array([[4.6594709e-01, -2.5247163e-03, 8.8094306e-01, 1.2939950e-04,\n -5.4364675e-05, -6.1112235e-04, 2.2041667e-01]], dtype=np.float32)\n controls /= np.array([[0.02239027, 0.02356066, 0.0405312, 0.00054858, 0.0005754,\n 0.00046352, 0.41451886]], dtype=np.float32)\n\n x_delta = states[start_timestep, 0] - states[-1, 0]\n y_delta = states[start_timestep, 1]-states[-1, 1]\n if direction_filter == 'x':\n if not (abs(x_delta) > 0.55 and abs(y_delta) < 0.2):\n \n continue \n if direction_filter == 'y':\n if not (abs(x_delta) < 0.20 and abs(y_delta) > 0.55):\n continue \n \n trajectories.append((\n states[start_timestep:],\n utils.DictIterator(observations)[start_timestep:],\n controls[start_timestep:]\n ))\n\n ## Uncomment this line to generate the lines required to normalize data\n # _print_normalization(trajectories)\n\n return trajectories", "def motion_extraction():\n # iterate through frames\n global frame_height, frame_width\n global limb_coords, init_coords\n frame_count = 0\n has_frames, frame = capture.read()\n\n while has_frames:\n img_out = frame.copy()\n img_out = insert_padding(img_out, 14*14, 12*14)\n\n if frame_count == 0:\n # change global values of height and width\n frame_height = frame_height + 14*14*2\n frame_width = frame_width + 12*14*2\n get_start_positions(img_out)\n img_out2 = segment_red(img_out, 200, 130)\n #erode(img_out2, 4, 6)\n remove_artifacts(img_out2)\n #enhance_contrast(img_out2)\n\n if frame_count > 0:\n get_motion(prev_frame, img_out2, frame_count)\n\n prev_frame = img_out2.copy()\n frame_count += 1\n has_frames, frame = capture.read()", "def get_computed_camera_poses(self):\n pairs = sorted((timestamp, pose) for timestamp, pose in self.frame_deltas.items())\n current_pose = tf.Transform()\n computed_poses = {}\n for timestamp, delta in pairs:\n delta = delta.find_relative(tf.Transform()) # Flip the direction from previous pose relative to new\n current_pose = current_pose.find_independent(delta)\n computed_poses[timestamp] = current_pose\n return computed_poses", "def process(self):\r\n\r\n\t\ttot_scenes = len(self.scenes)\r\n\r\n\t\t# motion intensity is how much the objects inside the frame move\r\n\t\t# this holds how much intensity each scene has\r\n\t\tself.scene_motion_intensity = [0]*tot_scenes\r\n\r\n\t\t# camera motion occurs when the recording camera itself moves\r\n\t\t# this holds the number of frames in each scene that have\r\n\t\t# camera motion\r\n\t\tself.scene_camera_motion = [0]*tot_scenes\r\n\r\n\t\t# for multithreading\r\n\t\ttmp_file1 = cv2.VideoCapture(self.file_path)\r\n\t\ttmp_file2 = cv2.VideoCapture(self.file_path)\r\n\r\n\t\tf1 = lambda : self.estimate(0, tot_scenes//3, self.file)\r\n\t\tf2 = lambda : self.estimate(tot_scenes//3, 2*tot_scenes//3, tmp_file1)\r\n\t\tf3 = lambda : self.estimate(2*tot_scenes//3, tot_scenes, tmp_file2)\r\n\r\n\t\tt1 = HelperThread(\"Optical Flow 1\", f1)\r\n\t\tt2 = HelperThread(\"Optical Flow 2\", f2)\r\n\t\tt3 = HelperThread(\"Optical Flow 3\", f3)\r\n\r\n\t\tt1.start(), t2.start(), t3.start()\r\n\t\tt1.join(), t2.join(), t3.join()\r\n\r\n\t\ttmp_file2.release()\r\n\t\ttmp_file1.release()", "def compute_trajectory():\n pass", "def triangulate_points_and_reproject(R_l, t_l, R_r, t_r, K, points3d_with_views, img_idx1, img_idx2, kpts_i, kpts_j, kpts_i_idxs, kpts_j_idxs, reproject=True):\n\n print(f\"Triangulating: {len(kpts_i)} points.\")\n P_l = np.dot(K, np.hstack((R_l, t_l)))\n P_r = np.dot(K, np.hstack((R_r, t_r)))\n\n kpts_i = np.squeeze(kpts_i)\n kpts_i = kpts_i.transpose()\n kpts_i = kpts_i.reshape(2,-1)\n kpts_j = np.squeeze(kpts_j)\n kpts_j = kpts_j.transpose()\n kpts_j = kpts_j.reshape(2,-1)\n\n point_4d_hom = cv2.triangulatePoints(P_l, P_r, kpts_i, kpts_j)\n points_3D = cv2.convertPointsFromHomogeneous(point_4d_hom.transpose())\n for i in range(kpts_i.shape[1]):\n source_2dpt_idxs = {img_idx1:kpts_i_idxs[i], img_idx2:kpts_j_idxs[i]}\n pt = Point3D_with_views(points_3D[i], source_2dpt_idxs)\n points3d_with_views.append(pt)\n\n if reproject:\n kpts_i = kpts_i.transpose()\n kpts_j = kpts_j.transpose()\n rvec_l, _ = cv2.Rodrigues(R_l)\n rvec_r, _ = cv2.Rodrigues(R_r)\n projPoints_l, _ = cv2.projectPoints(points_3D, rvec_l, t_l, K, distCoeffs=np.array([]))\n projPoints_r, _ = cv2.projectPoints(points_3D, rvec_r, t_r, K, distCoeffs=np.array([]))\n delta_l , delta_r = [], []\n for i in range(len(projPoints_l)):\n delta_l.append(abs(projPoints_l[i][0][0] - kpts_i[i][0]))\n delta_l.append(abs(projPoints_l[i][0][1] - kpts_i[i][1]))\n delta_r.append(abs(projPoints_r[i][0][0] - kpts_j[i][0]))\n delta_r.append(abs(projPoints_r[i][0][1] - kpts_j[i][1]))\n avg_error_l = sum(delta_l)/len(delta_l)\n avg_error_r = sum(delta_r)/len(delta_r)\n print(f\"Average reprojection error for just-triangulated points on image {img_idx1} is:\", avg_error_l, \"pixels.\")\n print(f\"Average reprojection error for just-triangulated points on image {img_idx2} is:\", avg_error_r, \"pixels.\")\n errors = list(zip(delta_l, delta_r))\n return points3d_with_views, errors, avg_error_l, avg_error_r\n\n return points3d_with_views", "def move_to_point_and_extract(coords_from_to: list,\n gps: adapters.GPSUbloxAdapter,\n vesc_engine: adapters.VescAdapterV4,\n smoothie: adapters.SmoothieAdapter,\n camera: adapters.CameraAdapterIMX219_170,\n periphery_det: detection.YoloOpenCVDetection,\n precise_det: detection.YoloOpenCVDetection,\n logger_full: utility.Logger,\n report_field_names,\n trajectory_saver: utility.TrajectorySaver,\n working_zone_polygon,\n img_output_dir,\n nav: navigation.GPSComputing,\n data_collector: datacollection.DataCollector,\n log_cur_dir,\n image_saver: utility.ImageSaver,\n notification: NotificationClient,\n extraction_manager_v3: ExtractionManagerV3,\n ui_msg_queue: posix_ipc.MessageQueue,\n SI_speed: float,\n wheels_straight: bool,\n navigation_prediction: navigation.NavigationPrediction,\n future_points: list,\n allow_extractions: bool,\n x_scan_poly: list,\n cur_field):\n\n if config.ALLOW_FIELD_LEAVING_PROTECTION and cur_field is not None and len(cur_field) > 2:\n enable_field_leaving_protection = True\n else:\n enable_field_leaving_protection = False\n if config.ALLOW_FIELD_LEAVING_PROTECTION:\n if cur_field is None:\n msg = f\"WARNING: robot field leaving protection WILL NOT WORK as given field is None\"\n print(msg)\n logger_full.write(msg)\n elif len(cur_field) < 3:\n msg = f\"WARNING: robot field leaving protection WILL NOT WORK as given field contains \" \\\n f\"{len(cur_field)} points (required ar least 3 points)\"\n print(msg)\n logger_full.write(msg)\n\n extract = SI_speed > 0 and allow_extractions\n\n vesc_speed = SI_speed * config.MULTIPLIER_SI_SPEED_TO_RPM\n speed_fast = config.SI_SPEED_FAST * config.MULTIPLIER_SI_SPEED_TO_RPM\n vesc_speed_fast = speed_fast if SI_speed >= 0 else -speed_fast\n navigation_prediction.set_SI_speed(SI_speed)\n\n raw_angles_history = []\n detections_period = []\n navigations_period = []\n stop_helping_point = nav.get_coordinate(\n coords_from_to[1], coords_from_to[0], 90, 1000)\n learn_go_straight_index = 0\n learn_go_straight_history = []\n\n last_skipped_point = coords_from_to[0]\n start_Nav_while = True\n last_correct_raw_angle = 0\n point_status = \"origin\"\n last_corridor_side = 0\n current_corridor_side = 1\n almost_start = 0\n\n prev_maneuver_time = time.time()\n working_mode_slow = 1\n working_mode_fast = 2\n working_mode_switching = 3\n current_working_mode = working_mode_slow\n last_working_mode = 0\n # True if robot is close to one of current movement vector points, False otherwise; False if speed limit near points is disabled\n close_to_end = config.USE_SPEED_LIMIT\n bumper_is_pressed = None\n\n # message queue sending temporary performance tracker\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf = {\n \"max_time\": 0,\n \"min_time\": float(\"inf\"),\n \"total_time\": 0,\n \"total_sends\": 0,\n \"timeouts_exceeded\": 0\n }\n\n # x movements during periphery scans\n x_scan_cur_idx = 0\n x_scan_idx_increasing = True\n\n # set camera to the Y min\n res = smoothie.custom_separate_xy_move_to(X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n # TODO: maybe should add sleep time as camera currently has delay\n\n if config.AUDIT_MODE:\n vesc_engine.set_target_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n try:\n notificationQueue = posix_ipc.MessageQueue(\n config.QUEUE_NAME_UI_NOTIFICATION)\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n notificationQueue = None\n\n degraded_navigation_mode = False\n\n number_navigation_cycle_without_gps = 0\n\n point_reading_t = last_send_gps_time = slow_mode_time = time.time()\n\n have_time_for_inference = True\n predictor_next_gps_expected_ts = float(\"inf\")\n\n # main navigation control loop\n while True:\n # gps point reading time predictor\n if have_time_for_inference and config.ALLOW_GPS_TIME_PREDICTIONS_LIMITING_INFERENCE:\n if time.time() + config.INFERENCE_MAX_TICK_TIME > predictor_next_gps_expected_ts:\n have_time_for_inference = False\n\n if have_time_for_inference:\n # EXTRACTION CONTROL\n start_t = time.time()\n frame = camera.get_image()\n frame_t = time.time()\n\n per_det_start_t = time.time()\n if extract:\n plants_boxes = periphery_det.detect(frame)\n else:\n plants_boxes = list()\n per_det_end_t = time.time()\n detections_period.append(per_det_end_t - start_t)\n\n if config.SAVE_DEBUG_IMAGES:\n image_saver.save_image(\n frame,\n img_output_dir,\n label=\"PE_view_M=\" + str(current_working_mode),\n plants_boxes=plants_boxes)\n if config.ALLOW_GATHERING and current_working_mode == working_mode_slow and \\\n image_saver.get_counter(\"gathering\") < config.DATA_GATHERING_MAX_IMAGES:\n image_saver.save_image(frame, config.DATA_GATHERING_DIR,\n plants_boxes=plants_boxes, counter_key=\"gathering\")\n\n if extract:\n msg = \"View frame time: \" + str(frame_t - start_t) + \"\\t\\tPeri. det. time: \" + \\\n str(per_det_end_t - per_det_start_t)\n else:\n msg = \"View frame time: \" + str(frame_t - start_t) + \"\\t\\tPeri. det. (extractions are off) time: \" + \\\n str(per_det_end_t - per_det_start_t)\n logger_full.write(msg + \"\\n\")\n\n # MOVEMENT AND ACTIONS MODES\n if config.AUDIT_MODE:\n dc_start_t = time.time()\n\n # count detected plant boxes for each type\n plants_count = dict()\n for plant_box in plants_boxes:\n plant_box_name = plant_box.get_name()\n if plant_box_name in plants_count:\n plants_count[plant_box_name] += 1\n else:\n plants_count[plant_box_name] = 1\n\n # save info into data collector\n for plant_label in plants_count:\n data_collector.add_detections_data(plant_label,\n math.ceil((plants_count[plant_label]) / config.AUDIT_DIVIDER))\n\n # flush updates into the audit output file and log measured time\n if len(plants_boxes) > 0:\n data_collector.save_all_data(\n log_cur_dir + config.AUDIT_OUTPUT_FILE)\n\n dc_t = time.time() - dc_start_t\n msg = \"Last scan weeds detected: \" + str(len(plants_boxes)) + \\\n \", audit processing tick time: \" + str(dc_t)\n logger_full.write(msg + \"\\n\")\n else:\n # slow mode\n if current_working_mode == working_mode_slow:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : slow\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n if config.VERBOSE_EXTRACT:\n msg = \"[VERBOSE EXTRACT] Stopping the robot because we have detected plant(s).\"\n logger_full.write_and_flush(msg+\"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n # TODO this 0 rpm \"movement\" is to prevent robot movement during extractions, need to add this in future to rest speed modes too\n vesc_engine.set_time_to_move(config.VESC_MOVING_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(0, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(0, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n # TODO remove thread init from here!\n voltage_thread = threading.Thread(\n target=send_voltage_thread_tf,\n args=(vesc_engine, ui_msg_queue),\n daemon=True)\n voltage_thread.start()\n\n # single precise center scan before calling for PDZ scanning and extractions\n if config.ALLOW_PRECISE_SINGLE_SCAN_BEFORE_PDZ and not config.ALLOW_X_MOVEMENT_DURING_SCANS:\n time.sleep(config.DELAY_BEFORE_2ND_SCAN)\n frame = camera.get_image()\n plants_boxes = precise_det.detect(frame)\n\n # do PDZ scan and extract all plants if single precise scan got plants in working area\n if ExtractionManagerV3.any_plant_in_zone(plants_boxes, working_zone_polygon):\n if config.EXTRACTION_MODE == 1:\n extraction_manager_v3.extract_all_plants()\n elif config.EXTRACTION_MODE == 2:\n extraction_manager_v3.mill_all_plants()\n slow_mode_time = time.time()\n else:\n if config.EXTRACTION_MODE == 1:\n extraction_manager_v3.extract_all_plants()\n elif config.EXTRACTION_MODE == 2:\n extraction_manager_v3.mill_all_plants()\n slow_mode_time = time.time()\n\n if config.VERBOSE_EXTRACT:\n msg = \"[VERBOSE EXTRACT] Extract cycle are finish.\"\n logger_full.write_and_flush(msg+\"\\n\")\n\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n\n msg = \"Applying force step forward after extractions cycle(s)\"\n logger_full.write(msg + \"\\n\")\n if config.VERBOSE:\n print(msg)\n vesc_engine.set_time_to_move(config.STEP_FORWARD_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(\n config.SI_SPEED_STEP_FORWARD * config.MULTIPLIER_SI_SPEED_TO_RPM,\n vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n vesc_engine.wait_for_stop(vesc_engine.PROPULSION_KEY)\n\n elif config.SLOW_FAST_MODE and time.time() - slow_mode_time > config.SLOW_MODE_MIN_TIME:\n # move cork to fast mode scan position\n if config.VERBOSE:\n msg = \"SLOW MODE: moving cork to fast mode position\\n\"\n logger_full.write(msg)\n\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm((config.Y_MAX - config.Y_MIN) * config.SLOW_FAST_MODE_HEAD_FACTOR,\n \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Keeping in slow mode as failed to move camera to fast mode scan position, smoothie's response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n else:\n msg = \"Switching from 'slow mode' to 'switching mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_switching\n\n # TODO a bug: will not start moving if config.SLOW_MODE_MIN_TIME == 0 or too low (switch speed applies right after slow mode weeds extractions)\n if not vesc_engine.is_moving(vesc_engine.PROPULSION_KEY):\n vesc_engine.set_time_to_move(config.VESC_MOVING_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n # switching (from slow to fast) mode\n elif current_working_mode == working_mode_switching:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : switching to fast\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n if config.VERBOSE:\n msg = \"Moving cork to slow mode scan position\\n\"\n logger_full.write(msg)\n\n # smoothie.wait_for_all_actions_done()\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n current_working_mode = working_mode_slow\n slow_mode_time = time.time()\n vesc_engine.set_target_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n continue\n\n sm_cur_pos = smoothie.get_smoothie_current_coordinates(\n convert_to_mms=False)\n if abs(sm_cur_pos[\"X\"] - (config.X_MAX - config.X_MIN) / 2) < 0.001 and \\\n abs(sm_cur_pos[\"Y\"] - (config.Y_MAX - config.Y_MIN) * config.SLOW_FAST_MODE_HEAD_FACTOR) < 0.001:\n msg = \"Switching from 'switching mode' to 'fast mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_fast\n\n # fast mode\n elif current_working_mode == working_mode_fast:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : fast\"\n if config.LOG_SPEED_MODES:\n logger_full.write_and_flush(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n if config.VERBOSE:\n msg = \"Moving cork to slow mode scan position\\n\"\n logger_full.write(msg)\n\n # smoothie.wait_for_all_actions_done()\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n msg = \"Switching from 'fast mode' to 'slow mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_slow\n slow_mode_time = time.time()\n # TODO dont need anymore? as rpm is set at the end of slow mode\n # vesc_engine.set_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n continue\n elif close_to_end:\n cur_vesc_rpm = vesc_engine.get_current_rpm(\n vesc_engine.PROPULSION_KEY)\n if cur_vesc_rpm != vesc_speed:\n msg = f\"Applying slow speed {vesc_speed} at 'fast mode' \" \\\n f\"(was {cur_vesc_rpm}) \" \\\n f\"because of close_to_end flag trigger\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n vesc_engine.set_target_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n else:\n cur_vesc_rpm = vesc_engine.get_current_rpm(\n vesc_engine.PROPULSION_KEY)\n if cur_vesc_rpm != vesc_speed_fast:\n msg = f\"Applying fast speed {vesc_speed_fast} at 'fast mode' (was {cur_vesc_rpm})\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n vesc_engine.set_target_rpm(\n vesc_speed_fast, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(\n vesc_speed_fast, vesc_engine.PROPULSION_KEY)\n\n # NAVIGATION CONTROL\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n nav_start_t = time.time()\n\n if start_Nav_while:\n navigation_period = 1\n else:\n navigation_period = nav_start_t - prev_maneuver_time\n\n navigations_period.append(navigation_period)\n # time reference to decide the number of detection before resuming gps.get\n prev_maneuver_time = nav_start_t\n # print(\"tock\")\n\n if start_Nav_while:\n prev_pos_obj = cur_pos_obj\n prev_pos = prev_pos_obj.as_old_list\n start_Nav_while = False\n\n # mu_navigations_period, sigma_navigations_period = utility.mu_sigma(navigations_period)\n\n navigation_prediction.set_current_lat_long(cur_pos)\n\n # skip same points (non-blocking reading returns old point if new point isn't available yet)\n if math.isclose(cur_pos_obj.creation_ts, prev_pos_obj.creation_ts):\n # stop robot if there's no new points for a while\n if time.time() - point_reading_t > config.GPS_POINT_TIME_BEFORE_STOP:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n msg = f\"Stopping the robot due to exceeding time 'GPS_POINT_TIME_BEFORE_STOP=\" \\\n f\"{config.GPS_POINT_TIME_BEFORE_STOP}' limit without new gps points from adapter\"\n logger_full.write_and_flush(msg + \"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n gps_reconnect_ts = time.time()\n\n while True:\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n if math.isclose(cur_pos_obj.creation_ts, prev_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new GPS \" \\\n \"point (new points filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n else:\n msg = \"New GPS point received, continuing movement\"\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n else:\n continue\n\n # gps points reading time predictor\n predictor_next_gps_expected_ts = cur_pos_obj.receiving_ts + config.GPS_POINT_WAIT_TIME_MAX\n have_time_for_inference = True\n\n # points filter by quality flag\n if cur_pos[2] != \"4\" and config.ALLOW_GPS_BAD_QUALITY_NTRIP_RESTART:\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(logger_full)\n\n # stop robot due to bad point quality if allowed\n if config.ALLOW_GPS_BAD_QUALITY_STOP:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n logger_full.write_and_flush(\n \"Stopping the robot for lack of quality gps 4, waiting for it...\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n prev_bad_quality_pos_obj = cur_pos_obj\n gps_reconnect_ts = time.time()\n\n while True:\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n # check if it's a new point\n if math.isclose(cur_pos_obj.creation_ts, prev_bad_quality_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new \" \\\n \"GPS point (quality filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n continue\n else:\n prev_bad_quality_pos_obj = cur_pos_obj\n\n # check if it's a good quality point\n if cur_pos[2] != \"4\":\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(\n logger_full)\n else:\n msg = \"The gps has regained quality 4, starting movement\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n # points filter by distance\n prev_cur_distance = nav.get_distance(prev_pos, cur_pos)\n if config.ALLOW_GPS_PREV_CUR_DIST_STOP and prev_cur_distance > config.PREV_CUR_POINT_MAX_DIST:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n msg = f\"Stopping the robot due to GPS points filter by distance (assuming current position point \" \\\n f\"{str(cur_pos)} is wrong as distance between current position and prev. position {str(prev_pos)}\" \\\n f\" is bigger than config.PREV_CUR_POINT_MAX_DIST={str(config.PREV_CUR_POINT_MAX_DIST)})\"\n logger_full.write_and_flush(msg + \"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n prev_bad_quality_pos_obj = cur_pos_obj\n gps_reconnect_ts = distance_wait_start_ts = time.time()\n\n while True:\n if time.time() - distance_wait_start_ts > config.GPS_DIST_WAIT_TIME_MAX:\n msg = f\"Stopping waiting for good prev-cur distance due to timeout, using current point \" \\\n f\"{cur_pos} and starting moving again\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n # check if it's a new point\n if math.isclose(cur_pos_obj.creation_ts, prev_bad_quality_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new \" \\\n \"GPS point (distance filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n continue\n else:\n prev_bad_quality_pos_obj = cur_pos_obj\n\n # check if it's a good quality point or ignore point quality if bad quality stop is not allowed\n if cur_pos[2] != \"4\" and config.ALLOW_GPS_BAD_QUALITY_NTRIP_RESTART:\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(logger_full)\n continue\n\n # check if distance became ok\n prev_cur_distance = nav.get_distance(prev_pos, cur_pos)\n if prev_cur_distance <= config.PREV_CUR_POINT_MAX_DIST:\n msg = f\"Starting moving again after GPS points filter by distance as distance become OK \" \\\n f\"({str(prev_cur_distance)})\"\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n point_reading_t = time.time()\n\n trajectory_saver.save_point(cur_pos)\n if ui_msg_queue is not None and time.time()-last_send_gps_time >= 1:\n try:\n ui_msg_queue_send_ts = time.time()\n ui_msg_queue.send(json.dumps(\n {\"last_gps\": cur_pos}), timeout=config.QUEUE_WAIT_TIME_MAX)\n last_send_gps_time = time.time()\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_send_et = last_send_gps_time - ui_msg_queue_send_ts\n if ui_msg_queue_send_et < ui_msg_queue_perf[\"min_time\"]:\n ui_msg_queue_perf[\"min_time\"] = ui_msg_queue_send_et\n if ui_msg_queue_send_et > ui_msg_queue_perf[\"max_time\"]:\n ui_msg_queue_perf[\"max_time\"] = ui_msg_queue_send_et\n ui_msg_queue_perf[\"total_time\"] += ui_msg_queue_send_et\n ui_msg_queue_perf[\"total_sends\"] += 1\n except posix_ipc.BusyError:\n msg = f\"Current position wasn't sent to ui_msg_queue likely due to sending timeout \" \\\n f\"(max wait time: config.QUEUE_WAIT_TIME_MAX={config.QUEUE_WAIT_TIME_MAX}\"\n logger_full.write(msg + \"\\n\")\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf[\"timeouts_exceeded\"] += 1\n\n if config.CONTINUOUS_INFORMATION_SENDING and not degraded_navigation_mode:\n notification.set_current_coordinate(cur_pos)\n\n distance = nav.get_distance(cur_pos, coords_from_to[1])\n\n last_corridor_side = current_corridor_side\n perpendicular, current_corridor_side = nav.get_deviation(\n coords_from_to[0], coords_from_to[1], cur_pos)\n\n # stop the robot if it has left the field\n if enable_field_leaving_protection:\n for pt_idx in range(len(cur_field)):\n last_point = pt_idx + 1 == len(cur_field)\n\n if last_point:\n deviation, side = nav.get_deviation(cur_field[pt_idx], cur_field[0], cur_pos)\n else:\n deviation, side = nav.get_deviation(cur_field[pt_idx], cur_field[pt_idx + 1], cur_pos)\n\n if side == -1 and deviation > config.LEAVING_PROTECTION_DISTANCE_MAX:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n msg = f\"Robot is stopped due to leaving the field. Cur pos: '{str(cur_pos)}'; \" \\\n f\"Field comparison vector - P1: '{str(cur_field[pt_idx])}', \" \\\n f\"P2: '{str(cur_field[0] if last_point else cur_field[pt_idx + 1])}'\"\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n notification.set_robot_state(RobotStates.OUT_OF_SERVICE)\n exit()\n\n # check if arrived\n _, side = nav.get_deviation(\n coords_from_to[1], stop_helping_point, cur_pos)\n # if distance <= config.COURSE_DESTINATION_DIFF: # old way\n if side != 1: # TODO: maybe should use both side and distance checking methods at once\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n # msg = \"Arrived (allowed destination distance difference \" + str(config.COURSE_DESTINATION_DIFF) + \" mm)\"\n # TODO: service will reload script even if it done his work?\n msg = \"Arrived to \" + str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n # put the wheel straight\n if wheels_straight:\n response = smoothie.custom_move_to(A_F=config.A_F_MAX, A=0)\n if response != smoothie.RESPONSE_OK: # TODO: what if response is not ok?\n msg = \"Couldn't turn wheels to center (0), smoothie response:\\n\" + \\\n response\n print(msg)\n logger_full.write(msg + \"\\n\")\n else:\n # save wheels angle\n with open(config.LAST_ANGLE_WHEELS_FILE, \"w+\") as wheels_angle_file:\n wheels_angle_file.write(\n str(smoothie.get_adapter_current_coordinates()[\"A\"]))\n break\n\n # TODO check for bug: arrival check applies single speed for all path (while multiple speeds are applied)\n # check if can arrived\n if vesc_engine.get_current_rpm(vesc_engine.PROPULSION_KEY) / config.MULTIPLIER_SI_SPEED_TO_RPM * \\\n config.MANEUVERS_FREQUENCY > nav.get_distance(cur_pos, coords_from_to[1]):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n msg = \"Will have arrived before the next point to \" + \\\n str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n break\n\n # reduce speed if near the target point\n if config.USE_SPEED_LIMIT:\n distance_from_start = nav.get_distance(coords_from_to[0], cur_pos)\n close_to_end = distance < config.DECREASE_SPEED_TRESHOLD or distance_from_start < config.DECREASE_SPEED_TRESHOLD\n\n msg = \"Distance to B: \" + str(distance)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n msg = \"Prev: \" + str(prev_pos) + \" Cur: \" + str(cur_pos) + \" A: \" + str(coords_from_to[0]) \\\n + \" B: \" + str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n # pass by cur points which are very close to prev point to prevent angle errors when robot is staying\n # (too close points in the same position can produce false huge angles)\n\n navigation_prediction.run_prediction(coords_from_to, cur_pos)\n\n # raw_angle_cruise = nav.get_angle(coords_from_to[0], cur_pos, cur_pos, coords_from_to[1])\n # raw_angle_legacy = nav.get_angle(prev_pos, cur_pos, cur_pos, coords_from_to[1])\n raw_angle_centroid = nav.get_angle(\n prev_pos, cur_pos, coords_from_to[0], coords_from_to[1])\n raw_angle_cruise = - current_corridor_side * math.log(1+perpendicular)\n\n if nav.get_distance(coords_from_to[0], coords_from_to[1]) < config.CORNER_THRESHOLD and nav.get_distance(coords_from_to[1], future_points[0][0]) < config.CORNER_THRESHOLD:\n # if abs(raw_angle_legacy)>config.LOST_THRESHOLD:\n centroid_factor = config.CENTROID_FACTOR_LOST\n cruise_factor = 1/centroid_factor\n else:\n centroid_factor = config.CENTROID_FACTOR_ORIENTED\n cruise_factor = 1\n\n raw_angle = raw_angle_centroid*centroid_factor + raw_angle_cruise*cruise_factor\n\n # raw_angle = butter_lowpass_filter(raw_angle, 0.5, 4, 6)\n\n if config.LEARN_GO_STRAIGHT:\n if config.MIN_PERPENDICULAR_GO_STRAIGHT >= perpendicular:\n learn_go_straight_index += 1\n learn_go_straight_history.append(raw_angle)\n if len(learn_go_straight_history) >= config.VALUES_LEARN_GO_STRAIGHT:\n learn_go_straight = sum(\n learn_go_straight_history)/len(learn_go_straight_history)\n msg = f\"Average angle applied to the wheel for the robot to have found : {learn_go_straight}.\"\n logger_full.write_and_flush(msg + \"\\n\")\n # TODO opening and closing file 4 times per second\n with open(config.LEARN_GO_STRAIGHT_FILE, \"w+\") as learn_go_straight_file:\n learn_go_straight_file.write(str(learn_go_straight))\n else:\n learn_go_straight_index = 0\n\n # NAVIGATION STATE MACHINE\n if prev_cur_distance < config.PREV_CUR_POINT_MIN_DIST:\n raw_angle = last_correct_raw_angle\n # print(\"The distance covered is low\")\n point_status = \"skipped\"\n\n # register the last position where the robot almost stop\n # in order to disable the deviation servo for a config.POURSUIT_LIMIT length and then resume in cruise\n last_skipped_point = cur_pos\n else:\n last_correct_raw_angle = raw_angle\n point_status = \"correct\"\n\n almost_start = nav.get_distance(last_skipped_point, cur_pos)\n\n # sum(e)\n if len(raw_angles_history) >= config.WINDOW:\n raw_angles_history.pop(0)\n raw_angles_history.append(raw_angle)\n # print(\"len(raw_angles_history):\",len(raw_angles_history))\n sum_angles = sum(raw_angles_history)\n if sum_angles > config.SUM_ANGLES_HISTORY_MAX:\n msg = \"Sum angles \" + str(sum_angles) + \" is bigger than max allowed value \" + \\\n str(config.SUM_ANGLES_HISTORY_MAX) + \", setting to \" + \\\n str(config.SUM_ANGLES_HISTORY_MAX)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n # Get Ready to go down as soon as the angle get negatif\n raw_angles_history[len(raw_angles_history) -\n 1] -= sum_angles - config.SUM_ANGLES_HISTORY_MAX\n sum_angles = config.SUM_ANGLES_HISTORY_MAX\n elif sum_angles < -config.SUM_ANGLES_HISTORY_MAX:\n msg = \"Sum angles \" + str(sum_angles) + \" is less than min allowed value \" + \\\n str(-config.SUM_ANGLES_HISTORY_MAX) + \", setting to \" + \\\n str(-config.SUM_ANGLES_HISTORY_MAX)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n # get Ready to go up as soon as the angle get positive:\n raw_angles_history[len(raw_angles_history)-1] += - \\\n sum_angles - config.SUM_ANGLES_HISTORY_MAX\n sum_angles = -config.SUM_ANGLES_HISTORY_MAX\n\n # KP = 0.2*0,55\n # KI = 0.0092*0,91\n\n KP = getSpeedDependentConfigParam(\n config.KP, SI_speed, \"KP\", logger_full)\n KI = getSpeedDependentConfigParam(\n config.KI, SI_speed, \"KI\", logger_full)\n\n angle_kp_ki = raw_angle * KP + sum_angles * KI\n\n # smoothie -Value == left, Value == right\n target_angle_sm = angle_kp_ki * -config.A_ONE_DEGREE_IN_SMOOTHIE\n # target_angle_sm = 0 #Debug COVID_PLACE\n ad_wheels_pos = smoothie.get_adapter_current_coordinates()[\"A\"]\n # sm_wheels_pos = smoothie.get_smoothie_current_coordinates()[\"A\"]\n sm_wheels_pos = \"off\"\n\n # compute order angle (smoothie can't turn for huge values immediately also as cancel movement,\n # so we need to do nav. actions in steps)\n order_angle_sm = target_angle_sm - ad_wheels_pos\n\n # check for out of update frequency and smoothie execution speed range (for nav wheels)\n if order_angle_sm > config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND * \\\n config.A_ONE_DEGREE_IN_SMOOTHIE:\n msg = \"Order angle changed from \" + str(order_angle_sm) + \" to \" + str(\n config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND +\n config.A_ONE_DEGREE_IN_SMOOTHIE) + \" due to exceeding degrees per tick allowed range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND * \\\n config.A_ONE_DEGREE_IN_SMOOTHIE\n elif order_angle_sm < -(config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE):\n msg = \"Order angle changed from \" + str(order_angle_sm) + \" to \" + str(-(\n config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE)) + \" due to exceeding degrees per tick allowed range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = -(config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE)\n\n # convert to global smoothie coordinates\n order_angle_sm += ad_wheels_pos\n\n # checking for out of smoothie supported range\n if order_angle_sm > config.A_MAX:\n msg = \"Global order angle changed from \" + str(order_angle_sm) + \" to config.A_MAX = \" + \\\n str(config.A_MAX) + \\\n \" due to exceeding smoothie allowed values range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.A_MAX\n elif order_angle_sm < config.A_MIN:\n msg = \"Global order angle changed from \" + str(order_angle_sm) + \" to config.A_MIN = \" + \\\n str(config.A_MIN) + \\\n \" due to exceeding smoothie allowed values range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.A_MIN\n\n # cork x movement during periphery scans control\n if config.ALLOW_X_MOVEMENT_DURING_SCANS:\n if x_scan_idx_increasing:\n x_scan_cur_idx += 1\n if x_scan_cur_idx >= len(config.X_MOVEMENT_CAMERA_POSITIONS):\n x_scan_idx_increasing = False\n x_scan_cur_idx -= 2\n else:\n x_scan_cur_idx -= 1\n if x_scan_cur_idx < 0:\n x_scan_idx_increasing = True\n x_scan_cur_idx += 2\n # TODO do we check SI_speed earlier and do proper calculations and angle validations if here we'll get here a negative order angle instead of positive?\n response = smoothie.custom_move_to(\n A_F=config.A_F_MAX,\n A=order_angle_sm if SI_speed >= 0 else -order_angle_sm,\n X_F=config.X_MOVEMENT_CAMERA_X_F[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else None,\n X=config.X_MOVEMENT_CAMERA_POSITIONS[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else None\n )\n\n if response != smoothie.RESPONSE_OK:\n msg = \"Couldn't turn wheels! Smoothie response:\\n\" + response\n print(msg)\n logger_full.write(msg + \"\\n\")\n else:\n # TODO opening and closing file too often (likely 4 times per second)\n # save wheels angle\n with open(config.LAST_ANGLE_WHEELS_FILE, \"w+\") as wheels_angle_file:\n wheels_angle_file.write(\n str(smoothie.get_adapter_current_coordinates()[\"A\"]))\n\n raw_angle = round(raw_angle, 2)\n angle_kp_ki = round(angle_kp_ki, 2)\n order_angle_sm = round(order_angle_sm, 2)\n sum_angles = round(sum_angles, 2)\n distance = round(distance, 2)\n ad_wheels_pos = round(ad_wheels_pos, 2)\n perpendicular = round(perpendicular, 2)\n # sm_wheels_pos = round(sm_wheels_pos, 2)\n gps_quality = cur_pos[2]\n corridor = \"\"\n if current_corridor_side == -1:\n corridor = \"left\"\n elif current_corridor_side == 1:\n corridor = \"right\"\n\n raw_angle_cruise = round(raw_angle_cruise, 2)\n\n msg = str(gps_quality).ljust(5) + \\\n str(raw_angle).ljust(8) + \\\n str(angle_kp_ki).ljust(8) + \\\n str(order_angle_sm).ljust(8) + \\\n str(sum_angles).ljust(8) + \\\n str(distance).ljust(13) + \\\n str(ad_wheels_pos).ljust(8) + \\\n str(sm_wheels_pos).ljust(9) + \\\n point_status.ljust(12) + \\\n str(perpendicular).ljust(10) + \\\n corridor.ljust(9) + \\\n str(centroid_factor).ljust(16) + \\\n str(cruise_factor).ljust(14)\n print(msg)\n logger_full.write(msg + \"\\n\")\n\n # TODO vesc sensors are being asked 4 times per second\n # send voltage and track bumper state\n vesc_data = vesc_engine.get_sensors_data(\n report_field_names, vesc_engine.PROPULSION_KEY)\n if vesc_data is not None and \"input_voltage\" in vesc_data:\n if bumper_is_pressed is None:\n bumper_is_pressed = not vesc_data[\"input_voltage\"] > config.VESC_BUMBER_UNTRIGGER_VOLTAGE\n if bumper_is_pressed:\n msg = f\"Bumper is pressed initially before starting moving to point. \" \\\n f\"({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n elif not bumper_is_pressed and vesc_data[\"input_voltage\"] < config.VESC_BUMBER_TRIGGER_VOLTAGE:\n bumper_is_pressed = True\n msg = f\"Bumper was pressed. ({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n elif bumper_is_pressed and vesc_data[\"input_voltage\"] > config.VESC_BUMBER_UNTRIGGER_VOLTAGE:\n bumper_is_pressed = False\n msg = f\"Bumper was unpressed. ({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n\n if config.CONTINUOUS_INFORMATION_SENDING:\n notification.set_input_voltage(vesc_data[\"input_voltage\"])\n\n prev_pos_obj = cur_pos_obj\n prev_pos = prev_pos_obj.as_old_list\n\n msg = \"Nav calc time: \" + str(time.time() - nav_start_t)\n logger_full.write(msg + \"\\n\\n\")\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf[\"avg_time\"] = ui_msg_queue_perf[\"total_time\"] / \\\n ui_msg_queue_perf[\"total_sends\"]\n msg = f\"Position sending performance report: {ui_msg_queue_perf}\"\n if config.VERBOSE:\n print(msg)\n logger_full.write(msg + \"\\n\")", "def get_goal_ee_pose(self):\n #self.target_endpoint = #magic tf call that I can add ie the pose of the palm from camera aruco detection\n while True:\n try:\n translation, rotation = self.listener.lookupTransform('world_frame', 'palm_frame_camera', rospy.Time()) # ee_frame_camera_flipped\n break # once the transform is obtained move on\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue # if it fails try again\n point = [translation[0], translation[1], translation[2]]\n self.target_endpoint = np.array(point)\n # rospy.logerr(self.target_endpoint)", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation", "def predict_poses(self, inputs, features):\n outputs = dict()\n # Compute the pose to each source frame via a separate forward pass through the pose network.\n # select what features the pose network takes as input\n if self.args.pose_model_type == \"shared\":\n pose_feats = {frame_id: features[frame_id] for frame_id in self.args.frame_idxs}\n else:\n pose_feats = {frame_id: inputs[(\"color_aug\", frame_id, 0)] for frame_id in self.args.frame_idxs}\n\n for frame_id in self.args.frame_idxs[1:]:\n # To maintain ordering we always pass frames in temporal order\n if frame_id == -1:\n pose_inputs = [pose_feats[frame_id], pose_feats[0]]\n else:\n pose_inputs = [pose_feats[0], pose_feats[frame_id]]\n\n if self.args.pose_model_type == \"separate\":\n pose_inputs = [self.models[\"pose_encoder\"](torch.cat(pose_inputs, 1))]\n\n axisangle, translation = self.models[\"pose\"](pose_inputs)\n\n # Normalize the translation vec and multiply by the displacement magnitude obtained from speed\n # of the vehicle to scale it to the real world translation\n translation_magnitude = translation[:, 0].squeeze(1).norm(p=\"fro\",\n dim=1).unsqueeze(1).unsqueeze(2)\n translation_norm = translation[:, 0] / translation_magnitude\n translation_norm *= inputs[(\"displacement_magnitude\", frame_id)].unsqueeze(1).unsqueeze(2)\n translation = translation_norm\n\n outputs[(\"axisangle\", 0, frame_id)] = axisangle\n outputs[(\"translation\", 0, frame_id)] = translation\n # Invert the matrix if the frame id is negative\n outputs[(\"cam_T_cam\", 0, frame_id)] = pose_vec2mat(axisangle[:, 0],\n translation,\n invert=(frame_id < 0),\n rotation_mode=self.args.rotation_mode)\n return outputs", "def _add_trajectory(self, prev_time_step, action, new_time_step):\n\n traj = tf_agents.trajectories.trajectory.from_transition(\n prev_time_step, action, new_time_step)\n\n self.replay_buffer.add_batch(traj)\n self.replay_buffer_position += 1\n\n if self.replay_buffer_position == REPLAY_BUFFER_SIZE + 1:\n if not self.keep_models_fixed:\n self.agent.train(self.replay_buffer.gather_all())\n self.replay_buffer_position = 0\n self.replay_buffer.clear()", "def run_frame(self, image):\n self.frame_idx += 1\n # run main pipeline\n t0 = datetime.now()\n disp = self.main_pipeline(image)\n t1 = datetime.now()\n logging.info('main pipeline: {}'.format(get_tdiff(t0, t1)))\n \n # prepare image sequence of 3 for trajectory pipeline\n t0 = datetime.now()\n self.image_seq.append(image)\n if len(self.image_seq) > 3:\n del self.image_seq[0]\n t1 = datetime.now()\n logging.info('image stack: {}'.format(get_tdiff(t0, t1)))\n\n # run trajectory pipeline\n t0 = datetime.now()\n if len(self.image_seq) >= 3:\n self.egomo_trmat = self.traj_pipeline(prev_trmat=self.egomo_trmat)\n t1 = datetime.now()\n logging.info('traj pipeline: {}'.format(get_tdiff(t0, t1)))\n return self.frame_idx, disp, self.egomo_trmat, self.t_list", "def prepare_data(cameras, frame_points_3d, frame_points_2d, keyframe_idx):\n camera_params = np.empty((0, 9))\n for c in cameras:\n R, _ = cv2.Rodrigues(c.R_mat)\n camera = build_camera(R, c.t)\n camera_params = np.append(camera_params, [camera], axis=0)\n\n camera_indices = []\n point_indices = []\n points_2d = np.empty((0, 2))\n points_3d = np.empty((0, 3))\n\n camera_id = 0\n pt_id_counter = 0\n for k, pts_2d in enumerate(frame_points_2d):\n if k > 0:\n halfway_idx = keyframe_idx[k] - keyframe_idx[k - 1] - 1\n points_2d = np.vstack((points_2d, frame_points_2d[k-1][halfway_idx]))\n points_3d = np.vstack((points_3d, frame_points_3d[k-1][halfway_idx]))\n camera_indices += [camera_id for _ in range(len(frame_points_2d[k-1][halfway_idx]))]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + len(frame_points_2d[k-1][halfway_idx]))]\n pt_id_counter = pt_id_counter + len(frame_points_2d[k-1][halfway_idx])\n\n if k > 1:\n end_idx = keyframe_idx[k + 1] - keyframe_idx[k - 1] - 3\n points_2d = np.vstack((points_2d, frame_points_2d[k-2][end_idx]))\n points_3d = np.vstack((points_3d, frame_points_3d[k-2][end_idx]))\n camera_indices += [camera_id for _ in range(len(frame_points_2d[k-2][end_idx]))]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + len(frame_points_2d[k-2][end_idx]))]\n pt_id_counter = pt_id_counter + len(frame_points_2d[k-2][end_idx])\n\n points_2d = np.vstack((points_2d, frame_points_2d[k][0]))\n points_3d = np.vstack((points_3d, frame_points_3d[k][0]))\n camera_indices += [camera_id for _ in range(pts_2d.shape[1])]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + pts_2d.shape[1])]\n\n camera_id += 1\n pt_id_counter = pt_id_counter + pts_2d.shape[1]\n\n return camera_params, np.asarray(camera_indices), np.asarray(point_indices), points_3d, points_2d", "def _feed_forward(self, video, img_metas):\n fmaps = []\n for i, modal in enumerate(img_metas['modality']):\n fmaps.append(getattr(self.backbone, modal)(video[i]))\n if hasattr(self, 'neck'):\n fmaps = [self.neck(fmap) for fmap in fmaps]\n if hasattr(self, 'cls_head'):\n logits = self.cls_head(fmaps, img_metas)\n else:\n return None, fmaps\n return logits, fmaps", "def get_things1(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_max):\n # Initializing the arrays\n points_3d = []\n points_2d = []\n camera_ind = []\n points_ind = []\n cam_params = []\n\n dst_3d = kp_3d\n dst_2d = kp_2d\n src_3d = map_3d\n src_2d = map_2d\n src_cam = map_cam\n low_bound = []\n up_bound = []\n my_min = 0\n\n # Updating the Camera parameters in map and setting the bounds for the update \n for i in range(my_min,my_max+1):\n cam_param = [map_view[i,0], map_view[i,1], map_view[i,2], map_view[i,3], map_view[i,4], map_view[i,5], f,0,0]\n cam_params.append(cam_param)\n\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n \n # Updating the Camera parameters for frame and setting the bounds for the update\n r = (R.from_matrix((H[0:3, 0:3]))).as_rotvec()\n t = H[:,3]\n cam_param = [r[0], r[1], r[2], t[0], t[1], t[2], f, 0, 0]\n cam_params.append(cam_param)\n \n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n\n new_cam = len(cam_params)-1\n cam_params = np.array(cam_params).reshape(-1,9)\n count = 0\n \n # listing variables to iterate \n l1 = []\n l2 = []\n count = 0\n \n for m in comp_list:\n count+=1\n l1.append(m.queryIdx)\n l2.append(m.trainIdx)\n\n l1 = np.array(l1).reshape(1,-1)\n l2 = np.array(l2).reshape(1,-1)\n l = np.vstack((l1,l2))\n l_fin = l[:,l[1, :].argsort()]\n j = 0\n count = len(points_3d)\n prev = -1\n final_l1 = []\n final_l2 = []\n final_des = []\n\n # Iterating through the list made and making sure no duplicates\n while(j<(len(l_fin[0]))):\n i1 = l_fin[0,j]\n i2 = l_fin[1,j]\n if(i2!=prev):\n # Map points insertion\n \n check = 0\n for ii in range(len(src_2d[i1])):\n m_2d = src_2d[i1][ii]\n check = 1\n ind = int(src_cam[i1][ii])\n points_2d.append([int((m_2d[0]%(2*cx))-cx), int((m_2d[1]%(2*cy))-cy),0])\n\n points_ind.append(count)\n camera_ind.append(ind)\n final_l1.append(i1)\n final_l2.append(0)\n \n # Taking Mean Desciptor if needed un comment 2 lines below\n # x = ((map_des[i1]*len(src_2d[i1]))+des[i2])/(len(src_2d[i1])+1)\n # map_des[i1] = x\n \n if(check==1):\n # Frame points insersion\n points_2d.append([int((dst_2d[i2,0])-cx), int((dst_2d[i2,1])-cy), 0])\n points_ind.append(count)\n camera_ind.append(new_cam)\n final_l1.append(i2)\n final_l2.append(1)\n wld_pt = src_3d[i1]\n points_3d.append([wld_pt[0], wld_pt[1], wld_pt[2]])\n prev = i2\n count = len(points_3d)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n src_2d[i1].append([int((dst_2d[i2,0])), int((dst_2d[i2,1]))])\n j+=1\n \n # Final Output\n cam_params = np.array(cam_params).reshape(-1,9)\n points_3d = np.array(points_3d)\n points_2d = np.array(points_2d)\n camera_ind = np.array(camera_ind).reshape(len(camera_ind))\n points_ind = np.array(points_ind).reshape(len(points_ind))\n final_l1 = np.array(final_l1)\n final_l2 = np.array(final_l2)\n return cam_params, points_3d, points_2d, camera_ind, points_ind, final_l1, final_l2, low_bound, up_bound, map_des, src_2d", "def main():\n # Placing imports here so it will be imported only if user want to test algorithm, not when importing\n # Class DepthCameraServer\n\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n import sensors_classes as sensors\n from images_processing_class import ImagesProcessing\n import struct\n import time\n\n # Starting Thread which receives data from VideoCamera, port od thread's socket must be the same as the port at\n # which data from VideoCamera is redirected, to be sure check where VideoCamera data stream is send in script env.py\n depth_camera_server = DepthCameraServer('localhost', 60012)\n depth_camera_server.run()\n\n pose_server = sensors.Pose_server('localhost', 60007)\n pose_server.run()\n\n # Waiting 1 sec to be sure than depth_camera_server has received minimum 1 image, because program will crash if\n # depth_camera_server doesn't have time to receive an image\n time.sleep(1)\n\n points = depth_camera_server.get_points()\n\n lista_punktow = []\n x = []\n y = []\n z = []\n\n data_pose_dict = pose_server.get_all()\n pose_x = data_pose_dict['x']\n pose_y = data_pose_dict['y']\n pose_z = data_pose_dict['z']\n\n yawp = data_pose_dict['yaw']\n pitchp = data_pose_dict['pitch']\n rollp = data_pose_dict['roll']\n\n # Each 3D point is a set of float(x,y,z). Each point has a size of 12 bytes because\n # 3*sizeof(float) = 12 bytes, that's why we are dividing data into parts with size of 12 and then\n # converting this data to tuple with 3 float (xyz).\n\n #\n # Processing cloud of points to seperate x, y and z was copied from dcam_old.py\n #\n\n for i in range(0, len(points) - 12, 12):\n xyz = struct.unpack('fff', points[i:i + 12])\n\n # rotation is included\n x1p, y1p, z1p = rotation(xyz[2], xyz[0], xyz[1], yawp, pitchp, rollp)\n\n # data from pose is included\n xp = round(x1p + pose_x, 1)\n yp = round(y1p + pose_y, 1)\n zp = round(z1p + pose_z, 1)\n temp = [xp, yp, zp]\n lista_punktow.append(temp)\n\n # Choosing only these points which have minimum 0.45 meters at z-axis, but why???\n for i in lista_punktow:\n x.append(i[0])\n y.append(i[1])\n z.append(i[2])\n\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x, y, z, cmap='viridis', linewidth=0.5)\n ax.scatter(x[0], y[0], z[0], c='red')\n ax.scatter(x[1], y[1], z[1], c='yellow')\n ax.scatter(x[2], y[2], z[2], c='black')\n ax.scatter(pose_x, pose_y, pose_z, c='green')\n plt.show()", "def run_tracker(p):\n # load model\n net = torch.load(os.path.join(p.net_base_path, p.net))\n net = net.to(device)\n\n # evaluation mode\n net.eval()\n\n # load sequence\n img_list, target_position, target_size = load_sequence(p.seq_base_path, p.video)\n\n # first frame\n img_uint8 = cv2.imread(img_list[0])\n img_uint8 = cv2.cvtColor(img_uint8, cv2.COLOR_BGR2RGB)\n img_double = np.double(img_uint8) # uint8 to float\n\n # compute avg for padding\n avg_chans = np.mean(img_double, axis=(0, 1))\n\n wc_z = target_size[1] + p.context_amount * sum(target_size)\n hc_z = target_size[0] + p.context_amount * sum(target_size)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = p.examplar_size / s_z\n\n # crop examplar z in the first frame\n z_crop = get_subwindow_tracking(img_double, target_position, p.examplar_size, round(s_z), avg_chans)\n\n z_crop = np.uint8(z_crop) # you need to convert it to uint8\n # convert image to tensor\n z_crop_tensor = 255.0 * F.to_tensor(z_crop).unsqueeze(0)\n\n d_search = (p.instance_size - p.examplar_size) / 2\n pad = d_search / scale_z\n s_x = s_z + 2 * pad\n # arbitrary scale saturation\n min_s_x = p.scale_min * s_x\n max_s_x = p.scale_max * s_x\n\n # generate cosine window\n if p.windowing == 'cosine':\n window = np.outer(np.hanning(p.score_size * p.response_UP), np.hanning(p.score_size * p.response_UP))\n elif p.windowing == 'uniform':\n window = np.ones((p.score_size * p.response_UP, p.score_size * p.response_UP))\n window = window / sum(sum(window))\n\n # pyramid scale search\n scales = p.scale_step**np.linspace(-np.ceil(p.num_scale/2), np.ceil(p.num_scale/2), p.num_scale)\n\n # extract feature for examplar z\n z_features = net.feat_extraction(Variable(z_crop_tensor).to(device))\n z_features = z_features.repeat(p.num_scale, 1, 1, 1)\n\n # do tracking\n bboxes = np.zeros((len(img_list), 4), dtype=np.double) # save tracking result\n start_time = datetime.datetime.now()\n for i in range(0, len(img_list)):\n if i > 0:\n # do detection\n # currently, we only consider RGB images for tracking\n img_uint8 = cv2.imread(img_list[i])\n img_uint8 = cv2.cvtColor(img_uint8, cv2.COLOR_BGR2RGB)\n img_double = np.double(img_uint8) # uint8 to float\n\n scaled_instance = s_x * scales\n scaled_target = np.zeros((2, scales.size), dtype = np.double)\n scaled_target[0, :] = target_size[0] * scales\n scaled_target[1, :] = target_size[1] * scales\n\n # extract scaled crops for search region x at previous target position\n x_crops = make_scale_pyramid(img_double, target_position, scaled_instance, p.instance_size, avg_chans, p)\n\n # get features of search regions\n x_crops_tensor = torch.FloatTensor(x_crops.shape[3], x_crops.shape[2], x_crops.shape[1], x_crops.shape[0])\n # response_map = SiameseNet.get_response_map(z_features, x_crops)\n for k in range(x_crops.shape[3]):\n tmp_x_crop = x_crops[:, :, :, k]\n tmp_x_crop = np.uint8(tmp_x_crop)\n # numpy array to tensor\n x_crops_tensor[k, :, :, :] = 255.0 * F.to_tensor(tmp_x_crop).unsqueeze(0)\n\n # get features of search regions\n x_features = net.feat_extraction(Variable(x_crops_tensor).to(device))\n\n # evaluate the offline-trained network for exemplar x features\n target_position, new_scale = tracker_eval(net, round(s_x), z_features, x_features, target_position, window, p)\n\n # scale damping and saturation\n s_x = max(min_s_x, min(max_s_x, (1 - p.scale_LR) * s_x + p.scale_LR * scaled_instance[int(new_scale)]))\n target_size = (1 - p.scale_LR) * target_size + p.scale_LR * np.array([scaled_target[0, int(new_scale)], scaled_target[1, int(new_scale)]])\n\n rect_position = np.array([target_position[1]-target_size[1]/2, target_position[0]-target_size[0]/2, target_size[1], target_size[0]])\n\n if p.visualization:\n visualize_tracking_result(img_uint8, rect_position, 1)\n\n # output bbox in the original frame coordinates\n o_target_position = target_position\n o_target_size = target_size\n bboxes[i,:] = np.array([o_target_position[1]-o_target_size[1]/2, o_target_position[0]-o_target_size[0]/2, o_target_size[1], o_target_size[0]])\n\n end_time = datetime.datetime.now()\n fps = len(img_list)/max(1.0, (end_time-start_time).seconds)\n\n return bboxes, fps", "def process(self, step_guess_orientation=True, step_advanced_alignement=True,\n step_gen_worldfiles=True, step_load_worldfiles=True,\n step_gen_vrts=True, step_load_vrts=True,\n step_load_debug=True ):\n\n QgsMessageLog.logMessage(\"1/ Instantiating all images...\", \"QuickDroneMap\", 0)\n for root, dirs, files in os.walk(self.folder):\n for file in files:\n if file.endswith(\".jpg\") or file.endswith(\".JPG\"):\n image_path = os.path.join(root, file)\n image = Image(self, image_path)\n self.images.append(image)\n self.images = self.images[70:90]\n # for i in [301,300,329]: # 3 images, transform fails on all of them\n # for i in [397,398,364]: # 3 images, transform fails on one of them\n # for i in [377,380,381]: # 3 images, transform works on all of them\n # path = \"C:\\\\Users\\\\Olivier\\\\Dropbox\\\\Affaires\\\\SPC\\\\Sources\\\\quickdronemap\\\\test\\\\data\\\\DJI_{0:04d}.JPG\".format(i)\n # self.images.append(Image(self, path))\n\n QgsMessageLog.logMessage(\"2/ Assigning ids\", \"QuickDroneMap\", 0)\n for i, image in enumerate(self.images):\n image.id = i\n\n\n QgsMessageLog.logMessage(\"2/ Loading image attributes and parsing exif tags...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.set_attributes()\n\n if step_guess_orientation:\n QgsMessageLog.logMessage(\"3/ Building image sequences...\", \"QuickDroneMap\", 0)\n sorted_images = sorted(self.images, key=lambda x: x.timestamp)\n for i in range(len(sorted_images)):\n\n prev_image = sorted_images[i-1] if i>0 else None\n image = sorted_images[i]\n next_image = sorted_images[i+1] if i<len(sorted_images)-1 else None\n\n if prev_image is None or next_image is None:\n continue\n\n angle_p_i = math.atan2(image.point.x()-prev_image.point.x(),-image.point.y()+prev_image.point.y())\n angle_i_n = math.atan2(next_image.point.x()-image.point.x(),-next_image.point.y()+image.point.y())\n\n # Checking if the three images are aligned (if not, we're probably at an angle)\n dA = absolute_angle_difference(angle_p_i, angle_i_n)\n if dA > ANGLE_THRESHOLD:\n continue\n\n # Checking if the three images are near enough timewise, if not, it could be separate flights\n dT1 = image.timestamp - prev_image.timestamp\n dT2 = next_image.timestamp - image.timestamp\n if dT1 > TIME_THRESHOLD or dT2 > TIME_THRESHOLD:\n continue\n\n prev_image.next_image = image\n image.prev_image = prev_image\n image.next_image = next_image\n next_image.prev_image = image\n\n QgsMessageLog.logMessage(\"4/ Deriving orientation from image sequence\", \"QuickDroneMap\", 0)\n for image in self.images:\n # if the direction wasn't set in the Exif tags, we derive it from the image sequences\n if image.direction is None:\n img_a = image.prev_image or image \n img_b = image.next_image or image\n image.angle = math.atan2(img_b.point.x()-img_a.point.x(),-img_b.point.y()+img_a.point.y())\n\n if step_advanced_alignement:\n QgsMessageLog.logMessage(\"5/ Building image neighbourhood graph...\", \"QuickDroneMap\", 0)\n from scipy.spatial import Delaunay\n points = [(i.point.x(),i.point.y()) for i in self.images]\n triangulation = Delaunay(points)\n\n done = [[False for _i2 in self.images] for _i1 in self.images]\n for tri in triangulation.simplices:\n i1,i2,i3 = tri\n if not done[i1][i2]:\n e = Edge(self.images[i1], self.images[i2])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i2].edges.append(e)\n done[i1][i2] = True\n if not done[i1][i3]:\n e = Edge(self.images[i1], self.images[i3])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i3].edges.append(e)\n done[i1][i3] = True\n if not done[i2][i3]:\n e = Edge(self.images[i2], self.images[i3])\n self.edges.append(e)\n self.images[i2].edges.append(e)\n self.images[i3].edges.append(e)\n done[i2][i3] = True\n\n QgsMessageLog.logMessage(\"6/ Computing similarities\", \"QuickDroneMap\", 0)\n for i, edge in enumerate(self.edges):\n QgsMessageLog.logMessage(\"Done {} out of {}\".format(i,len(self.edges)), \"QuickDroneMap\", 0)\n QApplication.processEvents()\n edge.compute_transform()\n\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # QgsMessageLog.logMessage(\"Initial fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n\n # print(\"TESTING QUALITY OF SIMILARITY (disable optimization to do this)\")\n # done = []\n # edges_to_delete = []\n # for edge in self.edges:\n # QApplication.processEvents()\n\n # if edge.imageA in done or edge.imageB in done:\n # edges_to_delete.append(edge)\n # continue\n\n # done.append(edge.imageA)\n # done.append(edge.imageB)\n\n # d_angle = edge.angle\n # edge.imageB.angle = edge.imageA.angle + d_angle\n\n # f_scale = edge.scale\n # edge.imageB.scale = edge.imageA.scale * f_scale\n\n # d_point = QgsPointXY(edge.tvec[0],edge.tvec[1])\n # d_point = d_point.rotated(edge.imageA.angle)\n # d_point *= edge.imageA.pixel_size/DOWNSCALING_FACTOR\n # edge.imageB.point = edge.imageA.point + d_point\n # for edge in edges_to_delete:\n # self.edges.remove(edge)\n\n\n # print(\"AFTER PROTOTYPE PLACEMENT\")\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # self.calculate_fitness(initial_guess_np)\n\n\n QgsMessageLog.logMessage(\"7/ Optimizing\", \"QuickDroneMap\", 0)\n QApplication.processEvents()\n\n initial_guess_np, bounds = self.get_initial_values_and_bounds() \n # res_1 = least_squares(calculate_fitness, initial_guess_np, bounds=([b[0] for b in bounds],[b[1] for b in bounds]))\n res_1 = minimize(self.calculate_fitness, initial_guess_np, bounds=bounds)\n\n for image in self.images:\n px = res_1.x[image.id*4+0]\n py = res_1.x[image.id*4+1]\n pa = res_1.x[image.id*4+2]\n ps = res_1.x[image.id*4+3]\n image.point = QgsPointXY(px, py)\n image.angle = pa\n image.psize = ps\n\n initial_guess_np, _ = self.get_initial_values_and_bounds()\n QgsMessageLog.logMessage(\"After optimization fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n \n QgsMessageLog.logMessage(\"8/ Computing all transforms...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.update_transform()\n\n if step_gen_worldfiles:\n QgsMessageLog.logMessage(\"9a/ Creating and loading worldfiles\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_worldfile()\n if step_load_worldfiles:\n image.load_worldfile(self.iface)\n\n if step_gen_vrts:\n QgsMessageLog.logMessage(\"9b/ Creating and loading vrts\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_vrt()\n if step_load_vrts:\n image.load_vrt(self.iface)\n\n if step_load_debug:\n QgsMessageLog.logMessage(\"10/ Creating debug jsons files\", \"QuickDroneMap\", 0)\n edg_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 32628}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.point.x(), edge.imageA.point.y()],[edge.imageB.point.x(), edge.imageB.point.y()]]\n props = {k:v for (k,v) in vars(edge).items()}\n props['angle_a'] = edge.imageA.angle\n props['angle_b'] = edge.imageB.angle\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n edg_data['features'].append(feature)\n \n edg_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(edg_data, edg_file, default=lambda o: str(o))\n edg_file.close()\n layer = self.iface.addVectorLayer(edg_file.name,\"[DEBUG] Edges\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_edges_style.qml'))\n \n graph_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 4326}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.lon, edge.imageA.lat],[edge.imageB.lon, edge.imageB.lat]]\n props = {k:v for (k,v) in vars(edge).items()}\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n graph_data['features'].append(feature)\n\n graph_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(graph_data, graph_file, default=lambda o: str(o))\n graph_file.close()\n layer = self.iface.addVectorLayer(graph_file.name,\"[DEBUG] Graph\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_graph_style.qml'))", "def preview_trajectory(self, state, remain_timestep, vis=False):\n print('in preview trajectory')\n state_origin = copy.deepcopy(state)\n sim_state = [state[0][0].copy(), state[0][1]] \n\n joints = get_joints(self.joint_listener)\n ef_pose = get_ef_pose(self.pose_listener)\n ef_pose_origin = ef_pose.copy()\n joint_plan = [joints]\n ef_pose_plan = [ef_pose]\n\n for episode_steps in range(remain_timestep):\n state[0] = sim_state\n gaddpg_input_state = select_target_point(state)\n step = min(max(remain_timestep - episode_steps, 1), 25)\n action, _, _, aux_pred = agent.select_action(gaddpg_input_state, remain_timestep=step)\n action_pose = unpack_action(action)\n ef_pose = ef_pose.dot(action_pose)\n joints = solve_ik(joints, pack_pose(ef_pose))\n joint_plan.append(joints)\n ef_pose_plan.append(ef_pose)\n sim_next_point_state = se3_transform_pc(se3_inverse(action_pose), sim_state[0]) \n sim_state[0] = sim_next_point_state\n\n if vis:\n # vis entire traj. Might be useful\n poses_ = robot.forward_kinematics_parallel(\n wrap_value(joint_plan[0])[None], offset=True)[0]\n poses = [pack_pose(pose) for pose in poses_]\n line_starts, line_ends = grasp_gripper_lines(np.array(ef_pose_plan))\n points = state_origin[0][0]\n points = se3_transform_pc(ef_pose_origin, points)\n point_color = get_point_color(points)\n rgb = self.planner.planner_scene.renderer.vis(poses, list(range(10)), \n shifted_pose=np.eye(4),\n interact=2,\n V=np.array(V),\n visualize_context={\n \"white_bg\": True,\n \"project_point\": [points],\n \"project_color\": [point_color],\n \"static_buffer\": True,\n \"reset_line_point\": True,\n \"thickness\": [2],\n \"line\": [(line_starts[0], line_ends[0])],\n \"line_color\": [[255, 0, 0]], \n }\n )\n\n num = len(joint_plan)\n traj = np.zeros((num, 9), dtype=np.float32)\n for i in range(num):\n traj[i, :] = joint_plan[i]\n return traj", "def capture_last(self, frames, mode = WHOLE):\n session = {}\n session[SESSION_LEN] = frames\n session[SESSION_OBJ_2D] = {}\n\n for object_index in range(len(self.e.objects)):\n session[SESSION_OBJ_2D][object_index] = []\n\n # must < self.speed\n left_over_distance = 0\n\n captures = {}\n for i in range(self.n_objects):\n captures[i] = []\n\n frame_counter = 1\n\n # For the first frame, all objects are at the current positions\n for i in range(self.n_objects):\n captures[i].append(self.e.objects[i])\n\n if mode == SPEED:\n for object_index, prev_transform, next_transform, _, _, success, _, _ in self.action_storage[::-1]:\n if not success:\n continue\n obj = self.e.objects[object_index]\n\n path_distance = np.linalg.norm(prev_transform.position - next_transform.position)\n\n pos = self.speed - left_over_distance\n while pos < path_distance and frame_counter < frames:\n new_obj = obj.clone()\n\n new_obj.transform = (pos / path_distance) * prev_transform + (1 - pos/path_distance) * next_transform\n\n captures[object_index].append(new_obj)\n\n # For static objects, just add the last frames\n for i in range(self.n_objects):\n if i != object_index:\n captures[i].append(captures[i][-1])\n\n pos += self.speed\n frame_counter += 1\n\n # We have enough frames, don't need to trace back anymore\n if frame_counter >= frames:\n break\n else:\n # pos >= path_distance\n # recalculate left_over_distance for next action\n left_over_distance = pos - path_distance\n elif mode == WHOLE:\n # First pass:\n # Calculate the total travelling distance\n total_path_distance = 0.0\n for object_index, prev_transform, next_transform, _, _, success, _, _ in self.action_storage[::-1]:\n if not success:\n continue\n obj = self.e.objects[object_index]\n\n path_distance = np.linalg.norm(prev_transform.position - next_transform.position)\n\n total_path_distance += path_distance\n\n # Actual speed\n frame_distance = total_path_distance / (frames - 1)\n\n for object_index, prev_transform, next_transform, _, _, success, _, _ in self.action_storage[::-1]:\n # After each loop, left_over_distance < frame_distance\n if not success:\n continue\n obj = self.e.objects[object_index]\n\n path_distance = np.linalg.norm(prev_transform.position - next_transform.position)\n\n if path_distance == 0:\n # nothing to do here\n pass\n else:\n pos = frame_distance - left_over_distance\n\n #print ('path_distance = %.4f; frame_distance = %.4f; left_over_distance = %.4f ' % (path_distance, frame_distance, left_over_distance))\n while pos < path_distance:\n #print ('pos = %.4f' % pos)\n new_obj = obj.clone()\n\n new_obj.transform = (pos / path_distance) * prev_transform + (1 - pos/path_distance) * next_transform\n\n captures[object_index].append(new_obj)\n\n # For static objects, just add the last frames\n for i in range(self.n_objects):\n if i != object_index:\n captures[i].append(captures[i][-1])\n\n pos += frame_distance\n frame_counter += 1\n\n # 0 <= left_over_distance < frame_distance\n left_over_distance = pos - path_distance\n\n # back to the beginning, just interpolate the last frame\n if frame_counter < frames:\n while frame_counter < frames:\n for i in range(self.n_objects):\n captures[i].append(captures[i][-1])\n\n frame_counter += 1\n\n for object_index in range(self.n_objects):\n session[SESSION_OBJ_2D][object_index] = captures[object_index][::-1]\n\n return session" ]
[ "0.63010347", "0.62757874", "0.62689936", "0.59285986", "0.5928235", "0.5921437", "0.5783612", "0.56781733", "0.56098306", "0.55962694", "0.5596124", "0.55675745", "0.5542845", "0.5538803", "0.55189425", "0.54891604", "0.5424595", "0.54225814", "0.541845", "0.5406309", "0.5383157", "0.53826237", "0.53797483", "0.534848", "0.5346385", "0.5338793", "0.5333521", "0.53193414", "0.5258969", "0.52540153" ]
0.7915945
0
returns a matrix with sourcesink pairs on the yaxis and version on the xaxis
def get_matrix(self, app): # get all source-sink combinations as y-labels ylabels = [] for apk in app['apks']: for data in apk['data'].keys(): if data not in ylabels: ylabels.append(data) ylabels.sort() if len(ylabels) == 0: ylabels.append("No URL found") matrix = numpy.zeros(shape=(len(app['apks']), len(ylabels))) xlabels = [] for i in range(0, len(app['apks'])): apk = app['apks'][i] label = apk['vercode'] + "~" + apk['date'] xlabels.append(label) values = [] for el in ylabels: if el in apk['data'].keys(): val = apk['data'][el] # don't limit values here values.append(val) else: if 'error' in apk.keys(): values.append(-1) else: values.append(0) matrix[i] = values return matrix.T, ylabels, xlabels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A", "def seg_row_col(sp) : \n return src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)", "def init_output_mat(self, y_list):", "def readSrc_bySens(self):\n dctn = self.srcData\n dctn['header'] = []\n # dctn['header'] = ['%% This dictionary created by alog_manip.alogrd_dict']\n for msg in self.srcFile: # broken by lines, are now strings\n msg = msg[0:-1] # remove \\n at the end of the string\n if '%%' in msg:\n dctn['header'].append(msg) # assume all comments occur at beginning of file\n else:\n msg = msg.split()\n if msg[2] not in dctn: # none from this gSource yet\n dctn[msg[2]] = {}\n if msg[1] not in dctn[msg[2]]: # none in this gSource from this zMeas yet\n dctn[msg[2]][msg[1]] = {}\n try:\n dctn[msg[2]][msg[1]][float(msg[0])] = float(msg[3]) # double\n except ValueError: # it's a string\n # dimc = msg[3].split(']')[0].split('x')[1] # cols\n # dimr = msg[3].split(']')[0].split('x')[0][1:] # rows\n value_s = msg[3].split(']')[1][1:-1].split(',')\n dctn[msg[2]][msg[1]][float(msg[0])] = [float(i) for i in value_s]\n except IndexError: # it's blank\n dctn[msg[2]][msg[1]][float(msg[0])] = None # nan better?", "def image_sources(dim, xs, order, rc):\n sources = np.zeros((number_of_sources(order)+1, 6))\n \"\"\"gain factor of sound source = 1\n number of the last hitted wall = 0\n propagation path = 0, because 0 wall hitted\"\"\"\n sources[0, :] = [xs[0], xs[1], xs[2], 1, 0, 0]\n\n c = 0 # counter to iterate\n r = 1 # variable to write data in the corresponding row\n while c <= number_of_sources(order - 1):\n sq = mirror_source(dim, [sources[c, 0], sources[c, 1],\n sources[c, 2]], sources[c, 3], sources[c, 4], rc,\n sources[c, 5])\n sources[r:r+sq.shape[0], :] = sq\n c += 1\n r += sq.shape[0]\n return(sources)", "def make_maps_of_2x1_pix_coordinates (sp) : \n x_rhs = np.arange(sp.colsh)*sp.pixs + sp.pixw - sp.pixsh\n x_rhs[0] = sp.pixwh # set x-coordinate of the wide pixel \n x_arr = np.hstack([-x_rhs[::-1],x_rhs])\n\n y_arr = np.arange(sp.rows) * sp.pixs\n y_arr -= y_arr[-1]/2 # move origin to the center of array\n\n sp.x_map2x1, sp.y_map2x1 = np.meshgrid(x_arr, y_arr)", "def _get_matrix(self, source_points, destination_points):\n return [\n [self.measure_between_two_points(point_a, point_b) for point_b in destination_points]\n for point_a in source_points\n ]", "def get_sources_and_targets(index_pairings):\n\n source_target_dictionary = {}\n links_list = []\n \n itr = 0\n \n for pair in index_pairings:\n source = pair[0]\n target = pair[1]\n\n source_target_dictionary = {\"source\":source, \"target\":target}\n links_list.append(source_target_dictionary)\n\n return links_list", "def ActiveHlt2Lines(self) :\n return []", "def ActiveHlt2Lines(self) :\n\n lines = [\n 'Hlt2SingleMuon',\n 'Hlt2SingleMuonHighPT',\n 'Hlt2SingleMuonLowPT',\n ]\n \n return lines", "def getSrc(self):\n xml = open(self.model, 'r')\n keywd1 = ['RA', 'DEC', 'PointSource']\n ra = []\n dec = []\n nam = []\n sep = []\n target = SkyCoord(ra=self.ra*u.degree, dec=self.dec*u.degree, frame='icrs') \n for line in xml :\n if keywd1[0] in line:\n ra.append( float(line.split('\"')[-2]) )\n if keywd1[1] in line:\n dec.append( float(line.split('\"')[-2]) )\n s = SkyCoord(ra=ra[-1]*u.degree, dec=dec[-1]*u.degree, frame='icrs')\n sep.append(target.separation(s).deg)\n if keywd1[2] in line:\n nam.append( line.split('\"')[3].split()[-1] ) # no '3FGL'\n xml.close()\n\n if self.csys == 'GAL':\n srcPos = SkyCoord(np.array(ra)*u.degree, np.array(dec)*u.degree, frame='icrs')\n ra, dec = srcPos.galactic.l.deg, srcPos.galactic.b.deg\n\n srcs = Table([ra, dec, nam, sep], names=('RA', 'DEC', 'Name', 'Separation'))\n return srcs", "def get_monitoring_data_specs(self):\n space = [self.get_input_space()]\n space += self.get_output_space()\n space = CompositeSpace(space)\n source = (self.get_input_source(), self.get_target_source(), 'second_targets')\n return (space, source)", "def buildxy(self):\n\n x_dim = float(self.metadata['XPTS'])\n xmin = float(self.metadata['XMIN'])\n xrange = float(self.metadata['XWID'])\n\n d_x = xrange/(x_dim-1)\n x_axis = (np.arange(xmin, xmin+x_dim*d_x, d_x))\n\n # y_dim = float(\"\".join(ProcessSpectra.get_from_dict('YPTS')))\n # ymin = list(map(float, get_from_dict('YMIN')))\n # yrange = float(\"\".join(ProcessSpectra.get_from_dict('YWID')))\n\n frwidth = 1000/(x_axis[0])\n frinc = frwidth/(len(self.zdata))\n freq = np.arange(-frwidth, frwidth, frinc*2)\n xdata = freq\n ydata = freq\n\n return xdata, ydata", "def ActiveHlt2Lines(self) :\n lines = [\n 'Hlt2B2HH',\n 'Hlt2B2PiPi',\n 'Hlt2B2KPi',\n 'Hlt2B2KK',\n 'Hlt2Lb2PK',\n 'Hlt2Lb2PPi'\n ]\n\n return lines", "def yedges(self):\n return self.edges[1]", "def intialize_source():\n raw = loadmat(\"p300backrec2.mat\")\n channels_raw = raw['channels']\n channels = []\n for i in channels_raw[0]:\n channels.extend(list(i))\n X = raw['data']\n marker = raw['marker']\n return X,channels,marker", "def add_edge_vectors(self):\n edge_vectors = np.eye(self.values.shape[1])\n self.values = np.vstack([self.values, edge_vectors])\n self.values_planar = np.vstack([self.values_planar, edge_vectors])\n self.number_of_vectors = self.values.shape[0]\n self.normalize()\n #df = pd.DataFrame(data=self.values)\n #fig = px.scatter_3d(df,x=0, y=1, z=2)\n #plotly.offline.plot(fig,filename=\"reference_vectors_plot.html\")", "def plot_attribute_surface(self, z_source, x_min = -3., x_max = 3., y_min = -5., y_max = 5., dim1=0, dim2=1, grid_res=0.5):\n # create the dataspace\n x1 = torch.arange(x_min, x_max, grid_res)\n x2 = torch.arange(y_min, y_max, grid_res)\n z1, z2 = torch.meshgrid([x1, x2])\n\n num_points = z1.size(0) * z1.size(1)\n\n # z = torch.randn(1, self.model.latent_space_dim)\n z = z_source\n \n z = z.repeat(num_points, 1)\n\n z[:, dim1] = z1.contiguous().view(1, -1)\n z[:, dim2] = z2.contiguous().view(1, -1)\n z = to_cuda_variable(z)\n # pass the points through the model decoder\n mini_batch_size = 1\n num_mini_batches = num_points // mini_batch_size\n\n nd_all = []\n nr_all = []\n rc_all = []\n aij_all = []\n # ie_all = []\n\n for i in tqdm(range(num_mini_batches)):\n\n # if i > 0:\n # break\n z_batch = z[i*mini_batch_size:(i+1)*mini_batch_size, :]\n dummy_score_tensor = to_cuda_variable(torch.zeros(z_batch.size(0), self.measure_seq_len))\n _, samples = self.model.decoder(\n z=z_batch,\n score_tensor=dummy_score_tensor,\n train=self.train\n )\n samples = samples.view(z_batch.size(0), -1)\n note_density = self.dataset.get_notes_density_in_measure(samples)\n note_range = self.dataset.get_note_range_of_measure(samples)\n rhy_complexity = self.dataset.get_rhy_complexity(samples)\n avg_interval_jump = self.dataset.get_average_pitch_interval_of_measure(samples)\n\n # interval_entropy = self.dataset.get_interval_entropy(samples)\n nd_all.append(note_density)\n nr_all.append(note_range)\n rc_all.append(rhy_complexity)\n aij_all.append(avg_interval_jump)\n # ie_all.append(interval_entropy)\n\n nd_all = to_numpy(torch.cat(nd_all, 0))\n nr_all = to_numpy(torch.cat(nr_all, 0))\n rc_all = to_numpy(torch.cat(rc_all, 0))\n aij_all = to_numpy(torch.cat(aij_all, 0))\n\n print(nd_all.shape)\n print(nr_all.shape)\n print(rc_all.shape)\n print(aij_all.shape)\n\n # ie_all = to_numpy(torch.cat(ie_all, 0))\n z = to_numpy(z)\n if self.trainer_config == '':\n reg_str = '[no_reg]'\n else:\n reg_str = self.trainer_config\n\n # filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_rhy_complexity_[' \\\n # + str(dim1) + ',' + str(dim2) + '].png'\n # self.plot_dim(z, rc_all, filename, dim1=dim1, dim2=dim2)\n\n # filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_note_range_[' \\\n # + str(dim1) + ',' + str(dim2) + '].png'\n # self.plot_dim(z, nr_all, filename, dim1=dim1, dim2=dim2)\n\n\n\n filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_note_density_[' \\\n + str(dim1) + ',' + str(dim2) + ']_3.png'\n self.plot_dim(z, nd_all, filename, dim1=dim1, dim2=dim2)\n\n filename = self.dir_path + '/plots_avgint_upscaled/' + reg_str + 'attr_surf_avg_interval_jump_[' \\\n + str(dim1) + ',' + str(dim2) + ']_3.png'\n self.plot_dim(z, aij_all, filename, dim1=dim1, dim2=dim2)", "def _get_Y_array_linked(self):\n\n # @TODO:\n # make linked_array a new method of MRTwo used in multiple methods\n #\n linked_array = vstack((self.shift_array[:self.not_linked_elem],\n self.shift_array[self.not_linked_elem + 1:]))\n\n # global coordinates from local coordinates of one quarter\n #\n y0 = self.length_xy_quarter\n y_array_linked = hstack((y0 - linked_array[:, 1],\n y0 + linked_array[:, 1],))\n\n # add the midcenter of the shell to the linking array\n #\n if self.link_edge_center == True:\n y_array_linked = hstack((y_array_linked, y0))\n\n return sort(y_array_linked)", "def _get_streams(self):\n in1 = self.ins[0]\n out1 = self.outs[0]\n hu = self._heat_utilities[0]\n in2 = hu._fresh\n out2 = hu._used\n return in1, in2, out1, out2", "def prepare2plot(self):\n # ======================== Check coordinates ========================\n # Check xyz :\n self.xyz = np.array(self.xyz).astype(np.float32)\n if self.xyz.ndim is not 2:\n self.xyz = self.xyz[:, np.newaxis]\n if 3 not in self.xyz.shape:\n raise ValueError(\"xyz must be an array of size (N, 3)\")\n elif self.xyz.shape[1] is not 3:\n self.xyz = self.xyz.T\n self.xyz = self.xyz\n self.nSources = self.xyz.shape[0]\n # Check coordinate system :\n if self.system not in ['mni', 'tal']:\n raise ValueError(\"The s_system must either be 'mni' or 'tal'.\")\n elif self.system is 'tal':\n self.xyz = tal2mni(self.xyz)\n\n # ======================== Check color ========================\n # Simple string :\n if isinstance(self.color, str):\n self.sColor = color2vb(color=self.color, default=self.color,\n length=self.nSources, alpha=self.alpha)\n # list of colors :\n elif isinstance(self.color, list):\n if len(self.color) != self.nSources:\n raise ValueError(\"The length of the color sources list must \"\n \"be the same the number of electrode.\")\n else:\n self.sColor = np.squeeze(np.array([color2vb(\n color=k, length=1, alpha=self.alpha) for k in self.color]))\n if (self.sColor.shape[1] is not 4):\n self.sColor = self.sColor.T\n # Array of colors :\n elif isinstance(self.color, np.ndarray):\n if self.color.shape == (1, 3) or self.color.shape == (1, 4):\n self.sColor = np.tile(self.color, (self.nSources, 1))\n elif self.nSources in self.color.shape:\n if (self.color.shape[1] is not 4):\n self.color = self.color.T\n self.sColor = self.color\n else:\n raise ValueError(\"color for sources must be a (N, 3) array \"\n \"(for rgb) or (N, 4) for rgba.\")\n\n # ======================== Check mask ========================\n # Check mask :\n if self.smask is not None:\n if (len(self.smask) != self.nSources) or not isinstance(\n self.smask, np.ndarray):\n raise ValueError(\"The mask must be an array of bool with the \"\n \"same length as the number of electrodes\")\n else:\n # Get the RGBA of mask color :\n self.sColor[self.smask, ...] = self.smaskcolor\n else:\n self.smask = np.zeros((self.nSources,), dtype=bool)\n\n # ======================== Check radius ========================\n # Check radius :\n if not isinstance(self.radiusmin, (int, float)):\n raise ValueError(\"s_radiusmin must be an integer or a float \"\n \"number.\")\n if not isinstance(self.radiusmax, (int, float)):\n raise ValueError(\"s_radiusmax must be an integer or a float \"\n \"number.\")\n if self.radiusmin >= self.radiusmax:\n raise ValueError(\"s_radiusmin must be > to s_radiusmax\")\n\n # --------------------------------------------------------------------\n # Check data :\n if self.data is None:\n self.data = np.ones((self.nSources,), dtype=np.float32)\n if not np.ma.isMaskedArray(self.data):\n self.data = np.ma.masked_array(np.ravel(self.data),\n mask=self.smask.copy())\n if len(self.data) != self.nSources:\n raise ValueError(\"The length of data must be the same as the \"\n \"number of electrodes\")\n else:\n self.array2radius()\n\n # --------------------------------------------------------------------\n # Check text :\n if self.stext is not None:\n if len(self.stext) != len(self.data):\n raise ValueError(\"The length of text data must be the same \"\n \"as the number of electrodes\")", "def center(sourcelocs, facutmznum, fachemi):\n \n # Fill up lists of x and y coordinates of all source vertices \n vertx_l = []\n verty_l = []\n for index, row in sourcelocs.iterrows():\n \n vertx_l.append(row[\"utme\"])\n verty_l.append(row[\"utmn\"])\n \n # If this is an area source, add the other 3 corners to vertex list\n if row[\"source_type\"].upper() == \"A\":\n angle_rad = m.radians(row[\"angle\"])\n utme1 = row[\"utme\"] + row[\"lengthx\"] * m.cos(angle_rad)\n utmn1 = row[\"utmn\"] - row[\"lengthx\"] * m.sin(angle_rad)\n utme2 = (row[\"utme\"] + (row[\"lengthx\"] * m.cos(angle_rad)) +\n (row[\"lengthy\"] * m.sin(angle_rad)))\n utmn2 = (row[\"utmn\"] + (row[\"lengthy\"] * m.cos(angle_rad)) -\n (row[\"lengthx\"] * m.sin(angle_rad)))\n utme3 = row[\"utme\"] + row[\"lengthy\"] * m.sin(angle_rad)\n utmn3 = row[\"utmn\"] + row[\"lengthy\"] * m.cos(angle_rad)\n vertx_l.append(utme1)\n vertx_l.append(utme2)\n vertx_l.append(utme3)\n verty_l.append(utmn1)\n verty_l.append(utmn2)\n verty_l.append(utmn3)\n \n # If this is a volume source, then add the vertices of it\n if row[\"source_type\"].upper() == \"V\":\n utme1 = row[\"utme\"] + row[\"lengthx\"] * m.sqrt(2)/2\n utmn1 = row[\"utmn\"] - row[\"lengthy\"] * m.sqrt(2)/2\n utme2 = row[\"utme\"] + row[\"lengthx\"] * m.sqrt(2)/2\n utmn2 = row[\"utmn\"] + row[\"lengthy\"] * m.sqrt(2)/2\n utme3 = row[\"utme\"] - row[\"lengthx\"] * m.sqrt(2)/2\n utmn3 = row[\"utmn\"] + row[\"lengthy\"] * m.sqrt(2)/2\n vertx_l.append(utme1)\n vertx_l.append(utme2)\n vertx_l.append(utme3)\n verty_l.append(utmn1)\n verty_l.append(utmn2)\n verty_l.append(utmn3)\n \n # If line or buoyant line source, add second vertex\n if row[\"source_type\"].upper() == \"N\" or row[\"source_type\"].upper() == \"B\":\n vertx_l.append(row[\"utme_x2\"])\n verty_l.append(row[\"utmn_y2\"]) \n \n vertx_a = np.array(vertx_l)\n verty_a = np.array(verty_l)\n\n \n # Combine the x and y vertices lists into list of tuples and then get a\n # unique list of vertices of the form (x, y) where x=utme and y=utmn\n sourceverts = list(zip(vertx_l, verty_l))\n unique_verts = list(set(sourceverts))\n \n \n # Find the two vertices that are the farthest apart\n # Also find the corners of the modeling domain\n \n max_dist = 0\n max_x = min_x = vertx_a[0]\n max_y = min_y = verty_a[0]\n \n if len(unique_verts) > 1: #more than one source coordinate\n \n # initialize\n xmax1 = unique_verts[0][0]\n ymax1 = unique_verts[0][1]\n xmax2 = unique_verts[1][0]\n ymax2 = unique_verts[1][1]\n \n for i in range(0, len(unique_verts)-1):\n \n # corners\n max_x = max(max_x, unique_verts[i][0])\n max_y = max(max_y, unique_verts[i][1])\n min_x = min(min_x, unique_verts[i][0])\n min_y = min(min_y, unique_verts[i][1])\n \n # find farthest apart\n j = i + 1\n for k in range(j, len(unique_verts)):\n dist = m.sqrt((unique_verts[i][0] - unique_verts[k][0])**2 + \n (unique_verts[i][1] - unique_verts[k][1])**2)\n if dist > max_dist:\n max_dist = dist\n xmax1 = unique_verts[i][0]\n ymax1 = unique_verts[i][1]\n xmax2 = unique_verts[k][0]\n ymax2 = unique_verts[k][1]\n \n # Calculate the center of the facility in utm coordinates\n cenx = round((xmax1 + xmax2) / 2)\n ceny = round((ymax1 + ymax2) / 2)\n \n else: #single source coordinate\n \n # Calculate the center of the facility in utm coordinates\n cenx = round(max_x)\n ceny = round(max_y)\n\n\n # Compute the lat/lon of the center\n utmz = str(facutmznum) + fachemi\n cenlat, cenlon = UTM.utm2ll(ceny, cenx, utmz)\n \n return cenx, ceny, cenlon, cenlat, max_dist, vertx_a, verty_a", "def visualize_in_2d(self):\n ae = Autoencoder_test(self.data)\n self.code = ae.encode(n_dimension=2, learning_rate=0.01, training_epochs=10, batch_size=400)\n for i in range(len(self.cluster)):\n list_x = []\n list_y = []\n for j in self.cluster[i]:\n list_x.append(self.code[0][j,0])\n list_y.append(self.code[0][j,1])\n plt.scatter(list_x,list_y)\n plt.show()\n return", "def _links_as_columns(self):\n return Link(*map(np.array, zip(*self.links)))", "def __transposePlotConfigData(series: BoxSeries) -> List[List]:\n\n data: List[List] = list(series.data)\n\n return list(map(list, zip(*data)))", "def matrix_callback(value):\n self.hitlet_points = self.hitlets_to_hv_points(self.hitlets_per_event[value],\n t_ref=self.event_df.loc[value, 'time']\n )\n\n # Create the hitlet matrix and time stream:\n self.hitlet_matrix = self.plot_hitlet_matrix(hitlets=None,\n _hitlet_points=self.hitlet_points)\n return self.hitlet_matrix", "def lic_flow(vectors,len_pix=10):\n vectors = np.asarray(vectors)\n m,n,two = vectors.shape\n if two!=2:\n raise ValueError\n\n result = np.zeros((2*len_pix+1,m,n,2),dtype=np.int32) # FIXME: int16?\n center = len_pix\n result[center,:,:,0] = np.arange(m)[:,np.newaxis]\n result[center,:,:,1] = np.arange(n)[np.newaxis,:]\n\n for i in range(m):\n for j in range(n):\n y = i\n x = j\n fx = 0.5\n fy = 0.5\n for k in range(len_pix):\n vx, vy = vectors[y,x]\n print x, y, vx, vy\n if vx>=0:\n tx = (1-fx)/vx\n else:\n tx = -fx/vx\n if vy>=0:\n ty = (1-fy)/vy\n else:\n ty = -fy/vy\n if tx<ty:\n print \"x step\"\n if vx>0:\n x+=1\n fy+=vy*tx\n fx=0.\n else:\n x-=1\n fy+=vy*tx\n fx=1.\n else:\n print \"y step\"\n if vy>0:\n y+=1\n fx+=vx*ty\n fy=0.\n else:\n y-=1\n fx+=vx*ty\n fy=1.\n if x<0: x=0\n if y<0: y=0\n if x>=n: x=n-1\n if y>=m: y=m-1\n result[center+k+1,i,j,:] = y, x\n\n\n\n return result", "def generate_lines(self):\n x = self.square[0]\n y = self.square[1]\n lines = [[]]\n \n lines.append( ( (x, y+a) for a in range(1,8) ) )\n lines.append( ( (x+a, y) for a in range(1,8) ) )\n lines.append( ( (x, y-a) for a in range(1,8) ) )\n lines.append( ( (x-a, y) for a in range(1,8) ) )\n \n return lines", "def bitblt(self, src, row):\n srcpixel = 0\n dstpixel = row * self.width\n row_offset = self.width - src.width\n\n for _ in range(src.height):\n for _ in range(src.width):\n self.pixels[dstpixel] = src.pixels[srcpixel]\n srcpixel += 1\n dstpixel += 1\n dstpixel += row_offset\n\n # calc horizontal line mapping\n for y in range(self.height):\n self.lh_data.append([])\n x = 0\n while x < self.width:\n if self.pixels[y * self.width + x]:\n line_start = x\n line_end = x\n inline_x = x\n while inline_x <= self.width:\n if inline_x < self.width and self.pixels[y * self.width + inline_x]:\n inline_x += 1\n else:\n line_end = inline_x\n break\n self.lh_data[y].append((line_start, line_end - line_start))\n x = line_end + 1\n else:\n x += 1\n\n # calc vertical line mapping\n for x in range(self.width):\n self.lv_data.append([])\n y = 0\n while y < self.height:\n if self.pixels[y * self.width + x]:\n line_start = y\n line_end = y\n inline_y = y\n while inline_y <= self.height:\n if inline_y < self.height and self.pixels[inline_y * self.width + x]:\n inline_y += 1\n else:\n line_end = inline_y\n break\n self.lv_data[x].append((line_start, line_end - line_start))\n y = line_end + 1\n else:\n y += 1", "def __fillCoordinatesFromSource(self):\n self.xValues = []\n if self.yCoordinates:\n self.yValues = []\n if self.zCoordinates:\n self.zValues = []\n if self.clusterLabels:\n self.clusterValues = []\n if self.mixtureLabels:\n self.mixtureValues = []\n\n # initial setup for x,y,z Values, clusterValues, mixtureValues, and colorMapValues\n for pltIndex in range(len(self.outStreamTypes)):\n self.xValues.append(defaultdict(list))\n if self.yCoordinates:\n self.yValues.append(defaultdict(list))\n if self.zCoordinates:\n self.zValues.append(defaultdict(list))\n if self.clusterLabels:\n self.clusterValues.append(defaultdict(list))\n if self.mixtureLabels:\n self.mixtureValues.append(defaultdict(list))\n if self.colorMapCoordinates[pltIndex] is not None:\n self.colorMapValues[pltIndex] = defaultdict(list)\n\n # fill x,y,z Values, clusterValues, mixtureValues, and colorMapValues\n for pltIndex in range(len(self.outStreamTypes)):\n if len(self.sourceData[pltIndex]) == 0:\n return False\n dataSet = self.sourceData[pltIndex].asDataset()\n # anything but HistorySet\n if self.sourceData[pltIndex].type.strip() != 'HistorySet':\n for i in range(len(self.xCoordinates[pltIndex])):\n xSplit = self._returnSplitIndex('x', pltIndex, i)\n self.xValues[pltIndex][1].append(np.asarray(dataSet[xSplit].values.astype(float, copy=False)))\n if self.yCoordinates:\n for i in range(len(self.yCoordinates[pltIndex])):\n ySplit = self._returnSplitIndex('y', pltIndex, i)\n self.yValues[pltIndex][1].append(np.asarray(dataSet[ySplit.strip()].values.astype(float, copy=False)))\n if self.zCoordinates and self.dim > 2:\n for i in range(len(self.zCoordinates[pltIndex])):\n zSplit = self._returnSplitIndex('z', pltIndex, i)\n self.zValues[pltIndex][1].append(np.asarray(dataSet[zSplit.strip()].values.astype(float, copy=False)))\n if self.clusterLabels:\n for i in range(len(self.clusterLabels[pltIndex])):\n clusterSplit = self._returnSplitIndex('clusterLabels', pltIndex, i)\n self.clusterValues[pltIndex][1].append(np.asarray(dataSet[clusterSplit.strip()].values.astype(float, copy=False)))\n if self.mixtureLabels:\n for i in range(len(self.mixtureLabels[pltIndex])):\n mixtureSplit = self._returnSplitIndex('mixtureLabels', pltIndex, i)\n self.mixtureValues[pltIndex][1].append(np.asarray(dataSet[mixtureSplit.strip()].values.astype(float, copy=False)))\n if self.colorMapCoordinates[pltIndex] is not None:\n for i in range(len(self.colorMapCoordinates[pltIndex])):\n cSplit = self._returnSplitIndex('colorMap', pltIndex, i)\n self.colorMapValues[pltIndex][1].append(np.asarray(dataSet[cSplit.strip()].values.astype(float, copy=False)))\n # check if the array sizes are consistent\n sizeToMatch = self.xValues[pltIndex][1][-1].size\n if self.yCoordinates and self.yValues[pltIndex][1][-1].size != sizeToMatch:\n self.raiseAnError(Exception, f\"<y> variable has a size ({self.yValues[pltIndex][1][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.zCoordinates and self.dim > 2 and self.zValues[pltIndex][1][-1].size != sizeToMatch:\n self.raiseAnError(Exception, f\"<z> variable has a size ({self.zValues[pltIndex][1][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.colorMapCoordinates[pltIndex] is not None and self.colorMapValues[pltIndex][1][-1].size != sizeToMatch:\n self.raiseAnError(Exception, f\"<colorMap> variable has a size ({self.colorMapValues[pltIndex][1][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n else:\n # HistorySet\n pivotParam = self.sourceData[pltIndex].indexes[0]\n for cnt in range(len(self.sourceData[pltIndex])):\n maxSize = 0\n for i in range(len(self.xCoordinates[pltIndex])):\n xSplit = self._returnSplitIndexHS('x', pltIndex, i)\n # for variable from input space, it will return array(float), not 1d array\n self.xValues[pltIndex][cnt].append(np.atleast_1d(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[xSplit].values.astype(float, copy=False)))\n maxSize = self.xValues[pltIndex][cnt][-1].size if self.xValues[pltIndex][cnt][-1].size > maxSize else maxSize\n if self.yCoordinates:\n for i in range(len(self.yCoordinates[pltIndex])):\n ySplit = self._returnSplitIndexHS('y', pltIndex, i)\n self.yValues[pltIndex][cnt].append(np.atleast_1d(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[ySplit].values.astype(float, copy=False)))\n maxSize = self.yValues[pltIndex][cnt][-1].size if self.yValues[pltIndex][cnt][-1].size > maxSize else maxSize\n if self.zCoordinates and self.dim > 2:\n for i in range(len(self.zCoordinates[pltIndex])):\n zSplit = self._returnSplitIndexHS('z', pltIndex, i)\n self.zValues[pltIndex][cnt].append(np.atleast_1d(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[zSplit].values.astype(float, copy=False)))\n maxSize = self.zValues[pltIndex][cnt][-1].size if self.zValues[pltIndex][cnt][-1].size > maxSize else maxSize\n if self.colorMapCoordinates[pltIndex] is not None:\n for i in range(len(self.colorMapCoordinates[pltIndex])):\n colorSplit = self._returnSplitIndexHS('colorMap', pltIndex, i)\n self.colorMapValues[pltIndex][cnt].append(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[colorSplit].values.astype(float, copy=False))\n maxSize = self.colorMapValues[pltIndex][cnt][-1].size if self.colorMapValues[pltIndex][cnt][-1].size > maxSize else maxSize\n # expand the scalars in case they need to be plotted against histories\n if self.xValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.xValues[pltIndex][cnt][-1] = np.full(maxSize, self.xValues[pltIndex][cnt][-1])\n if self.yCoordinates and self.yValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.yValues[pltIndex][cnt][-1] = np.full(maxSize, self.yValues[pltIndex][cnt][-1])\n if self.zCoordinates and self.dim > 2 and self.zValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.zValues[pltIndex][cnt][-1] = np.full(maxSize, self.zValues[pltIndex][cnt][-1])\n if self.colorMapCoordinates[pltIndex] is not None and self.colorMapValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.colorMapValues[pltIndex][cnt][-1] = np.full(maxSize, self.colorMapValues[pltIndex][cnt][-1])\n # check if the array sizes are consistent\n if self.yCoordinates and self.yValues[pltIndex][cnt][-1].size != maxSize:\n self.raiseAnError(Exception, f\"<y> variable has a size ({self.yValues[pltIndex][cnt][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.zCoordinates and self.dim > 2 and self.zValues[pltIndex][cnt][-1].size != maxSize:\n self.raiseAnError(Exception, f\"<z> variable has a size ({self.zValues[pltIndex][cnt][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.colorMapCoordinates[pltIndex] is not None and len(self.colorMapValues[pltIndex][cnt][-1]) != maxSize:\n self.raiseAnError(Exception, f\"<colorMap> variable has a size ({self.colorMapValues[pltIndex][cnt][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n\n # check if values have been filled\n if len(self.xValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.xValues[pltIndex]:\n if len(self.xValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.xValues[pltIndex][key])):\n if self.xValues[pltIndex][key][i].size == 0:\n return False\n if self.yCoordinates:\n if len(self.yValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.yValues[pltIndex]:\n if len(self.yValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.yValues[pltIndex][key])):\n if self.yValues[pltIndex][key][i].size == 0:\n return False\n if self.zCoordinates and self.dim > 2:\n if len(self.zValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.zValues[pltIndex]:\n if len(self.zValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][i].size == 0:\n return False\n if self.clusterLabels:\n if len(self.clusterValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.clusterValues[pltIndex]:\n if len(self.clusterValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.clusterValues[pltIndex][key])):\n if self.clusterValues[pltIndex][key][i].size == 0:\n return False\n if self.mixtureLabels:\n if len(self.mixtureValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.mixtureValues[pltIndex]:\n if len(self.mixtureValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.mixtureValues[pltIndex][key])):\n if self.mixtureValues[pltIndex][key][i].size == 0:\n return False\n if self.colorMapCoordinates[pltIndex] is not None:\n if len(self.colorMapValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.colorMapValues[pltIndex]:\n if len(self.colorMapValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.colorMapValues[pltIndex][key])):\n if self.colorMapValues[pltIndex][key][i].size == 0:\n return False\n\n return True" ]
[ "0.54319865", "0.53537995", "0.53168917", "0.5256393", "0.51762635", "0.51466733", "0.5050392", "0.5049039", "0.5042114", "0.50266486", "0.5022293", "0.5015575", "0.5006958", "0.5006378", "0.4996374", "0.49779284", "0.49577492", "0.4901869", "0.48976383", "0.48949212", "0.48852792", "0.4873969", "0.48704338", "0.4860513", "0.48573405", "0.48531348", "0.48327827", "0.4823309", "0.48223808", "0.48200297" ]
0.6360529
0
init config, batch_size, epochs.
def __init__(self, config): self.model = None self.config = config self.batch_size = config.get('batch_size') self.epochs = config.get('epochs') self.steps_per_epoch = config.get('steps_per_epoch') self.validation_steps = config.get('validation_steps') self.distributed = config.get('distributed', False) # init model self.init()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, classCount):\n self.NUM_CLASSES = 1+classCount\n self.STEPS_PER_EPOCH = self.STEPS_PER_EPOCH / self.IMAGES_PER_GPU\n self.VALIDATION_STEPS = self.VALIDATION_STEPS / self.IMAGES_PER_GPU\n super(ModelConfig, self).__init__()", "def init_input_pipeline(self, config):\n\n ######################\n # Calibrate parameters\n ######################\n\n print('Initiating input pipelines')\n\n # Update num classes in config\n config.num_classes = self.num_classes - len(self.ignored_labels)\n config.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]\n\n print('ignored_label_inds:')\n print(config.ignored_label_inds)\n\n # Update network model in config\n config.network_model = self.network_model\n\n print('network_model:')\n print(config.network_model)\n\n # Calibrate generators to batch_num\n print('Calibrate generators to batch_num')\n self.batch_limit = self.calibrate_batches(config)\n\n # From config parameter, compute higher bound of neighbors number in a neighborhood\n hist_n = int(np.ceil(4 / 3 * np.pi * (config.density_parameter + 1) ** 3))\n\n # Initiate neighbors limit with higher bound\n print('Initiate neighbors limit with higher bound')\n self.neighborhood_limits = np.full(config.num_layers, hist_n, dtype=np.int32)\n\n # Calibrate max neighbors number\n print('Calibrate max neighbors number')\n self.calibrate_neighbors(config)\n\n ################################\n # Initiate tensorflow parameters\n ################################\n\n # Reset graph\n print('Reset graph')\n tf.reset_default_graph()\n\n # Set random seed (You also have to set it in network_architectures.weight_variable)\n #np.random.seed(42)\n #tf.set_random_seed(42)\n\n # Get generator and mapping function\n print('Get generator')\n gen_function, gen_types, gen_shapes = self.get_batch_gen('training', config)\n gen_function_val, _, _ = self.get_batch_gen('validation', config)\n print('Get mapping function')\n map_func = self.get_tf_mapping(config)\n\n ##################\n # Training dataset\n ##################\n\n # Create batched dataset from generator\n self.train_data = tf.data.Dataset.from_generator(gen_function,\n gen_types,\n gen_shapes)\n\n self.train_data = self.train_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n\n # Prefetch data\n self.train_data = self.train_data.prefetch(10)\n\n ##############\n # Test dataset\n ##############\n\n # Create batched dataset from generator\n self.val_data = tf.data.Dataset.from_generator(gen_function_val,\n gen_types,\n gen_shapes)\n\n # Transform inputs\n self.val_data = self.val_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n\n # Prefetch data\n self.val_data = self.val_data.prefetch(10)\n\n #################\n # Common iterator\n #################\n\n # create a iterator of the correct shape and type\n iter = tf.data.Iterator.from_structure(self.train_data.output_types, self.train_data.output_shapes)\n self.flat_inputs = iter.get_next()\n\n # create the initialisation operations\n self.train_init_op = iter.make_initializer(self.train_data)\n self.val_init_op = iter.make_initializer(self.val_data)", "def init_batch(self):\n pass", "def configure_steps(\n self,\n config: ConfigDict,\n len_train: int,\n len_test: int,\n ):\n # Set required defaults if not present\n if \"batch_size\" not in config:\n batch_size = 2 * jax.device_count()\n else:\n batch_size = config[\"batch_size\"]\n if \"num_epochs\" not in config:\n num_epochs = 10\n else:\n num_epochs = config[\"num_epochs\"]\n\n # Determine sharded vs. batch partition\n if batch_size % jax.device_count() > 0:\n raise ValueError(\"Batch size must be divisible by the number of devices\")\n self.local_batch_size: int = batch_size // jax.process_count()\n\n # Training steps\n self.steps_per_epoch: int = len_train // batch_size\n config[\"steps_per_epoch\"] = self.steps_per_epoch # needed for creating lr schedule\n self.num_steps: int = int(self.steps_per_epoch * num_epochs)\n\n # Evaluation (over testing set) steps\n num_validation_examples: int = len_test\n if \"steps_per_eval\" not in config:\n self.steps_per_eval: int = num_validation_examples // batch_size\n else:\n self.steps_per_eval = config[\"steps_per_eval\"]\n\n # Determine monitoring steps\n if \"steps_per_checkpoint\" not in config:\n self.steps_per_checkpoint: int = self.steps_per_epoch * 10\n else:\n self.steps_per_checkpoint = config[\"steps_per_checkpoint\"]\n\n if \"log_every_steps\" not in config:\n self.log_every_steps: int = self.steps_per_epoch * 20\n else:\n self.log_every_steps = config[\"log_every_steps\"]", "def __init__(self):\n self.num_examples_per_epoch = 99999\n self.optimizer = \"Adam\"\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 0.0001\n self.learning_rate_decay_factor = 0.5\n self.num_epochs_per_decay = 8.0\n\n # Learning rate when fine tuning the Inception v3 parameters.\n self.train_inception_learning_rate = 0.0001\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 5000", "def __init__(self, epochs, **kwargs):\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0", "def configure(self):\n self.data_batch_file = self.get_value_from_config('data_batch_file')\n self.batch_meta_file = self.get_value_from_config('batch_meta_file')\n self.has_background = self.get_value_from_config('has_background')\n self.num_classes = self.get_value_from_config('num_classes')\n self.converted_images_dir = self.get_value_from_config('converted_images_dir')\n if not self.converted_images_dir:\n self.converted_images_dir = self.data_batch_file.parent / 'converted_images'\n self.convert_images = self.get_value_from_config('convert_images')\n # create directory for storing images if it is necessary\n if self.convert_images and not self.converted_images_dir.exists():\n self.converted_images_dir.mkdir(parents=True)\n self.dataset_meta = self.get_value_from_config('dataset_meta_file')", "def __init__(self, epochs, learning_rate):\n\n self.epochs = epochs\n self.learning_rate = learning_rate", "def __init__(self, dataset, batch_size, epochs=None, rnd=None):\n self.dataset = dataset\n self.batch_size = batch_size\n self.epochs = epochs\n self.T = int(np.ceil(dataset.num_examples / batch_size))\n if self.epochs:\n self.T *= self.epochs\n\n self.rnd = get_rand_state(rnd)\n\n self.training_schedule = None\n self.iter_per_epoch = int(dataset.num_examples / batch_size)", "def __init__(self, config_path):\n cfg = Config.fromfile(config_path)\n self.cfg = cfg\n\n # Now make the dataloader\n self.dataset = build_dataset(cfg.data.test)\n\n self.loader = build_dataloader(\n self.dataset,\n imgs_per_gpu=1,\n workers_per_gpu=0,\n dist=False,\n shuffle=False\n )", "def __init__(self, path, epochs, batch_size):\n\n\t\tX_train, X_val, Y_train, Y_val = self._load_dataset(path)\n\n\t\tmodel = self._Resnet50(input_shape = (64, 64, 3), classes = 10)\n\t\tmodel.summary()\n\t\tcheckpointer = ModelCheckpoint(filepath=\"./data/model.h5\", verbose=0, save_best_only=True)\n\t\ttensorboard = TensorBoard(log_dir='data/./logs', histogram_freq=0, write_graph=True, write_images=True)\n\t\tmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\t\thistory = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size,shuffle=True, \n\t\tvalidation_data=(X_val, Y_val), verbose=1, callbacks=[checkpointer, tensorboard]).history", "def __init__(self, config_path):\n\n with open(config_path) as f:\n config_dict = yaml.load(f, Loader=yaml.FullLoader)\n\n # General\n self.config_path = config_path\n\n # Network parameters\n self.net_type = config_dict[\"NET\"][\"NET_TYPE\"]\n\n # Train parameters\n self.train_dataset_path = config_dict[\"TRAIN\"][\"TRAIN_DATASET_PATH\"]\n self.train_label_path = config_dict[\"TRAIN\"][\"TRAIN_LABEL_PATH\"]\n self.save_path = config_dict[\"TRAIN\"][\"SAVE_PATH\"]\n self.n_epoch = config_dict[\"TRAIN\"][\"N_EPOCH\"]\n self.lr = config_dict[\"TRAIN\"][\"LR\"]\n self.loss_period = config_dict[\"TRAIN\"][\"LOSS_PERIOD\"]\n self.mini_batch_size = config_dict[\"TRAIN\"][\"MINI_BATCH_SIZE\"]\n self.saving_epoch = config_dict[\"TRAIN\"][\"SAVING_EPOCH\"]\n self.load_checkpoint = bool(config_dict[\"TRAIN\"][\"LOAD_CHECKPOINT\"])\n\n # Validation parameters\n self.use_validation = bool(config_dict[\"VALIDATION\"][\"USE_VALIDATION\"])\n self.validation_period = config_dict[\"VALIDATION\"][\"VALIDATION_PERIOD\"]\n\n # Test parameters\n self.test_dataset_path = config_dict[\"TEST\"][\"TEST_DATASET_PATH\"]\n self.test_label_path = config_dict[\"TEST\"][\"TEST_LABEL_PATH\"]\n self.weight_path = config_dict[\"TEST\"][\"WEIGHT_PATH\"]\n\n # Mlflow\n self.log_weights = bool(config_dict[\"MLFLOW\"][\"LOG_WEIGHTS\"])\n\n # Packaging\n self.mlflow_pyfunc_model_path = config_dict[\"PACKAGING\"][\"MLFLOW_PYFUNC_MODEL_PATH\"]", "def __init__(self):\n # Number of examples per epoch of training data.\n self.num_examples_per_epoch = None \n\n # Optimizer for training the model.\n self.optimizer = \"SGD\" #default \"SGD\"\n\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 2.0 # default 2.0\n self.learning_rate_decay_factor = 0.8\n self.num_epochs_per_decay = 4 #default 8\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 2", "def get_test_config():\n config = get_config()\n config.batch_size = 2\n config.eval_batch_size = 2\n config.eval_num = 2\n config.eval_avg_num = 1\n config.num_train_steps = 2\n config.log_loss_every_steps = 1\n config.eval_every_steps = 1\n config.checkpoint_every_steps = 1\n config.df_dim = 16\n config.gf_dim = 16\n config.z_dim = 8\n config.show_num = 4\n config.num_epochs = 1\n config.shuffle_buffer_size = 10\n return config", "def __init__(self, batch_size=1, epochs=None, learning_rate=None, momentum=None, weights_name=''):\n self.batch_size = batch_size\n self.epochs = epochs\n self.model = None\n self.optimizer = None\n self.cb = None\n self.lr = learning_rate\n self.momentum = momentum\n self.weights_name = weights_name", "def __init__(self, batch_size=32, dim=(32,32), n_channels=3, n_classes=2, shuffle=True, debugMode=False):\n self.debugMode = debugMode\n self.generateLabelsAndIds()\n self.trainingGenerator = DataGeneratorClass(\n self.ids[\"train\"], self.labels, self.address,\n batch_size, dim, n_channels, n_classes, shuffle\n )\n self.validationGenerator = DataGeneratorClass(\n self.ids[\"validation\"], self.labels, self.address,\n batch_size, dim, n_channels, n_classes, shuffle\n )", "def train(self):\n for e in range(self.train_config['nb_epochs']):\n self.on_epoch(e)\n\n with open(os.path.join(self.exp_path, 'config.yml'), 'w') as outfile:\n yaml.dump(self.config, outfile, default_flow_style=False)", "def set_training_parameters(\n self,\n config: ConfigDict,\n len_train: int,\n len_test: int,\n ):\n self.configure_steps(config, len_train, len_test)\n self.configure_reporting(config)\n self.configure_training_functions(config)", "def __init__(self, \n training_epochs):\n self.training_epochs = training_epochs", "def __init__(self, nb_epochs, run=\"run\", verbose=True):\n self.nb_epochs = nb_epochs\n self.verbose = verbose", "def init_test_input_pipeline(self, config):\n\n print('Initiating test input pipelines')\n\n ######################\n # Calibrate parameters\n ######################\n\n # Update num classes in config\n config.num_classes = self.num_classes - len(self.ignored_labels)\n config.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]\n\n # Update network model in config\n config.network_model = self.network_model\n\n # Update num classes in config\n\n if config.network_model == 'multi_segmentation':\n config.num_classes = self.num_parts\n elif config.network_model == 'segmentation':\n if self.ShapeNetPartType in self.label_names:\n config.num_classes = self.num_parts[self.name_to_label[self.ShapeNetPartType]]\n else:\n raise ValueError('Wrong object name given for ShapeNetPart single object segmentation')\n\n # Calibrate generators to batch_num\n self.batch_limit = self.calibrate_batches(config)\n\n # From config parameter, compute higher bound of neighbors number in a neighborhood\n hist_n = int(np.ceil(4 / 3 * np.pi * (config.density_parameter + 1) ** 3))\n\n # Initiate neighbors limit with higher bound\n self.neighborhood_limits = np.full(config.num_layers, hist_n, dtype=np.int32)\n\n # Calibrate max neighbors number\n self.calibrate_neighbors(config)\n\n ################################\n # Initiate tensorflow parameters\n ################################\n\n # Reset graph\n tf.reset_default_graph()\n\n # Set random seed (You also have to set it in network_architectures.weight_variable)\n #np.random.seed(42)\n #tf.set_random_seed(42)\n\n # Get generator and mapping function\n gen_function, gen_types, gen_shapes = self.get_batch_gen('test', config)\n map_func = self.get_tf_mapping(config)\n\n ##############\n # Test dataset\n ##############\n\n # Create batched dataset from generator\n self.test_data = tf.data.Dataset.from_generator(gen_function,\n gen_types,\n gen_shapes)\n\n self.test_data = self.test_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n\n # Prefetch data\n self.test_data = self.test_data.prefetch(10)\n\n #################\n # Common iterator\n #################\n\n # create a iterator of the correct shape and type\n iter = tf.data.Iterator.from_structure(self.test_data.output_types, self.test_data.output_shapes)\n self.flat_inputs = iter.get_next()\n\n # create the initialisation operations\n self.test_init_op = iter.make_initializer(self.test_data)", "def __init__(self, config, data_loader, layer_hyperparams):\n self.config = config\n self.layer_hyperparams = layer_hyperparams\n\n if config.is_train:\n self.train_loader = data_loader[0]\n self.valid_loader = data_loader[1]\n self.num_train = len(self.train_loader.dataset)\n self.num_valid = self.valid_loader.dataset.trials\n else:\n if config.get_embedding:\n self.test_embedding_loader = data_loader\n self.n_embeddings = config.n_embeddings\n else:\n self.test_loader = data_loader\n self.num_test = self.test_loader.dataset.trials\n\n if config.use_batch_norm:\n self.model = SiameseNetWithBN()\n else:\n self.model = SiameseNet()\n \n if config.use_gpu:\n self.model.cuda()\n\n # model params\n self.num_params = sum(\n [p.data.nelement() for p in self.model.parameters()]\n )\n self.num_model = get_num_model(config)\n self.num_layers = len(list(self.model.children()))\n\n print('[*] Number of model parameters: {:,}'.format(self.num_params))\n\n # path params\n self.ckpt_dir = os.path.join(config.ckpt_dir, self.num_model)\n self.logs_dir = os.path.join(config.logs_dir, self.num_model)\n\n # misc params\n self.resume = config.resume\n self.use_gpu = config.use_gpu\n self.dtype = (\n torch.cuda.FloatTensor if self.use_gpu else torch.FloatTensor\n )\n\n # optimization params\n self.best = config.best\n self.best_valid_acc = 0.\n self.epochs = config.epochs\n self.start_epoch = 0\n self.lr_patience = config.lr_patience\n self.train_patience = config.train_patience\n self.counter = 0\n\n # grab layer-wise hyperparams\n self.init_lrs = self.layer_hyperparams['layer_init_lrs']\n self.init_momentums = [config.init_momentum]*self.num_layers\n self.end_momentums = self.layer_hyperparams['layer_end_momentums']\n self.l2_regs = self.layer_hyperparams['layer_l2_regs']\n\n # compute temper rate for momentum\n if self.epochs == 1:\n f = lambda max, min: min\n else:\n f = lambda max, min: (max - min) / (self.epochs-1)\n self.momentum_temper_rates = [\n f(x, y) for x, y in zip(self.end_momentums, self.init_momentums)\n ]\n\n # set global learning rates and momentums\n self.lrs = self.init_lrs\n self.momentums = self.init_momentums\n\n # # initialize optimizer\n # optim_dict = []\n # for i, layer in enumerate(self.model.children()):\n # group = {}\n # group['params'] = layer.parameters()\n # group['lr'] = self.lrs[i]\n # group['momentum'] = self.momentums[i]\n # group['weight_decay'] = self.l2_regs[i]\n # optim_dict.append(group)\n # self.optimizer = optim.SGD(optim_dict)\n # self.optimizer = optim.SGD(\n # self.model.parameters(), lr=1e-3, momentum=0.9, weight_decay=4e-4,\n # )\n self.optimizer = optim.Adam(\n self.model.parameters(), lr=3e-4, weight_decay=6e-5,\n )\n\n # # learning rate scheduler\n # self.scheduler = StepLR(\n # self.optimizer, step_size=self.lr_patience, gamma=0.99,\n # )\n self.debug = dict()", "def __init__(self, data_dir: Path, config: Config):\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n )\n\n training_path_list, ground_truth_path_list = get_file_paths(data_dir)\n\n X_train, X_test, y_train, y_test = self.train_test_split(\n training_path_list,\n ground_truth_path_list,\n test_portion=config.val_split,\n )\n\n train_dataset = TrainDataset(\n config, X_train, y_train, random_augmentation=True\n )\n val_dataset = TrainDataset(\n config, X_test, y_test, random_augmentation=False\n )\n\n self.train_loader = DataLoader(\n train_dataset,\n batch_size=config.train_batch_size,\n shuffle=True,\n pin_memory=True,\n )\n self.val_loader = DataLoader(\n val_dataset,\n batch_size=config.test_batch_size,\n # No shuffle as it won't make any difference\n pin_memory=True,\n )\n\n model = UNet(INPUT_CHANNELS, OUTPUT_CHANNELS, config)\n self.model = DataParallel(model).to(self.device)\n\n if config.loss == \"logit_bce\":\n loss_weight = (\n self._get_loss_weight() if config.balanced_loss else None\n )\n # Using logits directly is numerically more stable and efficient\n self.class_loss_fn = BCEWithLogitsLoss(pos_weight=loss_weight)\n elif config.loss == \"soft_dice\":\n self.class_loss_fn = soft_dice_loss\n\n self.texture_transform = get_texture_transform(config)\n self.shape_loss_fn = ContrastiveLoss(config.temperature)\n\n self.optim = Adam(\n self.model.parameters(),\n lr=config.learn_rate,\n weight_decay=config.weight_decay,\n )\n max_steps = config.epochs * len(self.train_loader)\n self.scheduler = OneCycleLR(\n self.optim,\n max_lr=config.max_learn_rate,\n total_steps=max_steps,\n )\n self.scaler = GradScaler(enabled=config.mixed_precision)\n\n # Used when dumping hyper-params to a file\n self.config = config\n\n # To store best acc achieved so far\n self.best_acc = 0.0", "def __init__(self, conf: DictConfig) -> None:\n self.seed = self.init_seed_cudnn(conf)\n self.device = self.setup_device(conf)\n self.epochs = conf.runner.epochs if 'epochs' in conf.runner else 0\n\n self.progress, self.print_to_term, self.log_mlflow = self.init_output_options(conf)\n\n self.module = self.setup_module(conf)\n\n self.val_acc = float('-inf')\n self.do_val = bool(conf.runner.val)\n\n self.test_every_epoch = conf.runner.test_every_epoch if 'test_every_epoch' in conf.runner else False\n self.dry_run = conf.runner.dry_run if 'dry_run' in conf.runner else False\n if self.dry_run:\n self.epochs = 1\n\n self.config = conf\n self.e = 0 # Set for linter, updated over multiple iterations", "def _parse_config(self, args, experiment_id):\r\n if not args:\r\n if experiment_id:\r\n config = importlib.import_module('configs.config_' + experiment_id)\r\n args = config.load_config()\r\n else:\r\n raise ValueError('No arguments or configuration data given')\r\n # Mandatory parameters for all architectures\r\n self.network_type = args.net\r\n self.is_training = args.training_mode\r\n self.train_data_file = args.train_data_file\r\n self.valid_data_file = args.valid_data_file\r\n self.test_data_file = args.test_data_file\r\n self.checkpoint_dir = args.checkpoint_dir\r\n self.trainlog_dir = args.trainlog_dir\r\n self.lr = args.lr\r\n self.batch_size = args.batch_size\r\n self.num_epochs = args.num_epochs\r\n self.loss_type = args.loss\r\n self.accuracy_type = args.accuracy\r\n self.optimizer = args.optimizer\r\n self.dropout = args.dropout\r\n self.gpu_load = args.gpu_load\r\n self.num_filters = args.num_filters\r\n self.nonlin = args.nonlin\r\n self.loss_type = args.loss\r\n self.task_type = args.task_type\r\n self.long_summary = args.long_summary\r\n self.experiment_path = args.experiment_path\r\n self.chpnt2load = args.chpnt2load\r\n self.lr_mode = args.lr_mode\r\n\r\n if not self.is_training:\r\n self.class_labels = args.class_labels\r\n if args.image_size:\r\n self.img_size = args.image_size\r\n else:\r\n self.img_size = None\r\n if args.num_classes:\r\n self.num_classes = args.num_classes\r\n else:\r\n self.num_classes = None\r\n if args.augmentation:\r\n self.augmentation_dict = args.augmentation\r\n else:\r\n self.augmentation_dict = None\r\n if args.normalize:\r\n self.normalize = args.normalize\r\n else:\r\n self.normalize = None\r\n if args.zero_center:\r\n self.zero_center = args.zero_center\r\n else:\r\n self.zero_center = None\r\n\r\n\r\n self._initialize_data()", "def setUp(self):\n self._default_call_inputs = (\n np.array([[1,2,3], [4,5,6]]),\n None\n )\n\n self._hash_bins = 10\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n self._masking = False\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim,\n \"masking\": self._masking\n }", "def init():\n ########################\n # OPTIONS\n ########################\n # Debugging tools\n global TIMER # displays time of every major step\n TIMER = True\n global MONITOR # displays monitoring infos\n MONITOR = False\n \n global directories\n directories = {'1Face': 'data/1Face/',\n '2Faces': 'data/2Faces/',\n '3Faces': 'data/3Faces/',\n 'test': 'data/test/'}\n \n # Opt. swicthes\n global maxfinder # to find the max dim. amongst the pictures\n maxfinder = False\n global ML_mode\n ML_mode = {'CNN_Train': False,\n 'CNN_Pred' : True,\n 'Sampler': True}\n \n # Global variables\n global num_pics\n num_pics = {'1Face': 0,\n '2Faces': 0,\n '3Faces': 0}\n global labels\n labels = {'1Face': 0,\n '2Faces': 1,\n '3Faces': 2}\n global num_data\n num_data = 0\n global splitsize # Fraction of data to build the training set\n splitsize = 0.7 \n global maxheight # Resize the pictures to a power of 2 for CNN (2^8 here)\n maxheight = 128\n global maxwidth\n maxwidth = 128\n global TreshEdge # Number of consecutive black pixels to define an edge\n TreshEdge = 2\n global TreshFace # Number of white pixels to define a face (or large edge)\n TreshFace = maxheight/16", "def config_and_train(self, sys_args):\n \n self.run_config_function(sys_args)\n self.set_model_name('vgg_16')\n self.set_trainable_and_exclude_scopes(constants.checkpoint_exclude_scopes,\n constants.trainable_scopes)\n self.set_optimizer('sgd')\n self.set_max_number_of_steps(6000)\n self.train_or_eval_net(sys_args)", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def __init__(self, config, tdim, batch_size):\n self.float_type = 'float32' # This should be the default\n self.config = config\n self.dt = self.config['dt']\n\n self.n_eachring = self.config['n_eachring']\n self.n_input = self.config['n_input']\n self.n_output = self.config['n_output']\n self.pref = np.arange(0,2*np.pi,2*np.pi/self.n_eachring) # preferences\n\n self.batch_size = batch_size\n self.tdim = tdim\n self.x = np.zeros((tdim, batch_size, self.n_input), dtype=self.float_type)\n self.y = np.zeros((tdim, batch_size, self.n_output), dtype=self.float_type)\n if self.config['loss_type'] == 'lsq':\n self.y[:,:,:] = 0.05\n # y_loc is the stimulus location of the output, -1 for fixation, (0,2 pi) for response\n self.y_loc = -np.ones((tdim, batch_size) , dtype=self.float_type)\n\n self._sigma_x = config['sigma_x']*np.sqrt(2/config['alpha'])" ]
[ "0.7023804", "0.6994861", "0.6986887", "0.69548535", "0.6946242", "0.69346786", "0.68955725", "0.68943435", "0.6829831", "0.6821801", "0.6818673", "0.6742996", "0.67338043", "0.6723518", "0.6714932", "0.6707389", "0.6679911", "0.6676357", "0.66759074", "0.6658294", "0.6651317", "0.6645307", "0.66348606", "0.6632388", "0.66290665", "0.66171396", "0.6611108", "0.6609005", "0.65896004", "0.658536" ]
0.78860265
0
You should implements inputs.
def inputs(self): return NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def input(self):", "def inputs(self):\n pass", "def process_inputs(self, inputs):", "def processInputs(self):", "def input(self):\r\n pass", "def __call__(self, *inputs):\n raise NotImplementedError", "def call(self, inputs):\n raise NotImplementedError", "def out(self, inputs):", "def __call__(self, inputs, states, **kwargs):\n raise NotImplementedError()", "def get_inputs(self):\r\n raise NotImplementedError", "def get_inputs(self):\r\n raise NotImplementedError", "def n_inputs(self):", "def d_input(self):\n pass", "def investigate(self, inputs):\r\n # Optional behavior\r\n return inputs", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def _TransformInputs(self, _):\n raise NotImplementedError()", "def apply(self, inputs):\n raise NotImplementedError()", "def forward(self, *inputs):\n raise NotImplementedError", "def call(self, inputs, state):\n raise NotImplementedError", "def forward(self, inputs):\n raise NotImplementedError", "def __init__(self):\n self.inputs = {}", "def call(self, inputs, **kwargs): # pylint: disable=unused-argument\n return inputs", "def get_input(self):\n pass", "def _expected_inputs():", "def __set_inputs__(self):\n self.__set_in_out_var__(None, 0) # TODO: inspect None", "def input(self, inputters, field, number=None):\n raise NotImplementedError", "def update(self, inputs): # pragma: no cover\n return inputs" ]
[ "0.8273351", "0.8123075", "0.8098877", "0.79928917", "0.7894954", "0.7837142", "0.76479155", "0.751315", "0.7328494", "0.73143715", "0.73143715", "0.7313171", "0.72914857", "0.7180601", "0.7165914", "0.7165914", "0.7165914", "0.7165914", "0.7058984", "0.7046415", "0.70070934", "0.6974683", "0.69640446", "0.6840195", "0.6810292", "0.68036807", "0.68015194", "0.6739522", "0.673081", "0.6704662" ]
0.82329375
1
Build optimizer, default to sgd.
def optimizer(self): return 'sgd'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compile(self, gen_optimizer, disc_optimizer):\n self.gen_optimizer = gen_optimizer\n self.disc_optimizer = disc_optimizer", "def compile_optimizer(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.cfg.learning_rate)\n\n return optimizer", "def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:\n norm_module_types = (\n torch.nn.BatchNorm1d,\n torch.nn.BatchNorm2d,\n torch.nn.BatchNorm3d,\n torch.nn.SyncBatchNorm,\n # NaiveSyncBatchNorm inherits from BatchNorm2d\n torch.nn.GroupNorm,\n torch.nn.InstanceNorm1d,\n torch.nn.InstanceNorm2d,\n torch.nn.InstanceNorm3d,\n torch.nn.LayerNorm,\n torch.nn.LocalResponseNorm,\n )\n params: List[Dict[str, Any]] = []\n memo: Set[torch.nn.parameter.Parameter] = set()\n for module in model.modules():\n for key, value in module.named_parameters(recurse=False):\n if not value.requires_grad:\n continue\n # Avoid duplicating parameters\n if value in memo:\n continue\n memo.add(value)\n lr = cfg.SOLVER.BASE_LR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY\n if isinstance(module, norm_module_types):\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM\n elif key == \"bias\":\n # NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0\n # and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer\n # hyperparameters are by default exactly the same as for regular\n # weights.\n lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n\n optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM)\n return optimizer", "def build_optimizer(opt_config, learning_rate):\n if opt_config.opt_method == 'SGD':\n print('Using SGD as the optimizer', file=sys.stderr)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n elif opt_config.opt_method == 'Adam':\n print('Using Adam as the optimizer', file=sys.stderr)\n optimizer = tf.train.AdamOptimizer(\n learning_rate, beta1=opt_config.adam_beta1,\n beta2=opt_config.adam_beta2, epsilon=opt_config.adam_epsilon\n )\n else:\n raise ValueError(\n 'Unknown optimization method {0}!'.format(opt_config.opt_method))\n return optimizer", "def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer, tpu_support=False):\n self.optimize_ops = []\n for loss in self.losses['train']: # TODO Create apropoiate external training scheme\n optimize_op = optimizer_to_use(\n learning_rate=self.learning_rate\n )\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(\n loss=loss,\n global_step=tf.train.get_global_step()\n )\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')", "def make_optimizer(self):\r\n # parameters = [self.encoder.parameters(), self.decoder.parameters(), self.spec_enc.parameters()]\r\n if self.flags.optim == 'Adam':\r\n op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n elif self.flags.optim == 'RMSprop':\r\n op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n elif self.flags.optim == 'SGD':\r\n op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n else:\r\n raise Exception(\"Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben\")\r\n return op", "def optimizer_creator(model, config):\n return torch.optim.SGD(model.parameters(), lr=config.get(\"lr\", 1e-4))", "def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def build_optimizer(optimizer_config, params, name=None):\n\n if optimizer_config.name == 'rms_prop_optimizer':\n\n optimizer = paddle.optimizer.RMSProp(\n parameters = params,\n learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),\n rho=optimizer_config.decay,\n momentum=optimizer_config.momentum_optimizer_value,\n epsilon=optimizer_config.epsilon,\n weight_decay=optimizer_config.weight_decay)\n\n if optimizer_config.name =='momentum_optimizer':\n\n optimizer = paddle.optimizer.SGD(\n parameters = params,\n learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),\n weight_decay=optimizer_config.weight_decay)\n\n if optimizer_config.name =='adam_optimizer':\n\n optimizer = paddle.optimizer.Adam(\n parameters = params,\n learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),\n weight_decay=optimizer_config.weight_decay)\n\n if optimizer is None:\n raise ValueError('Optimizer %s not supported.' % optimizer_config.name)\n\n if optimizer_config.use_moving_average:\n raise ValueError('paddle don\\'t support moving average')\n if name is None:\n # assign a name to optimizer for checkpoint system\n optimizer.name = optimizer_config.name\n else:\n optimizer.name = name\n return optimizer", "def create_optimizer(self, context, optimizer, host):\n pass", "def initialize_optimization(self):\n\n if self.FLAGS.optimizer == \"Adam\" :\n self.solver = tf.train.AdamOptimizer(\n learning_rate = self.learning_rate,\n beta1 = self.FLAGS.beta1,\n beta2 = self.FLAGS.beta2)\n else:\n print(\"ERROR: Cannot handle optimizer type {}!!!\".format(self.FLAGS.optimizer))\n raise RuntimeError\n \n # batch normalization in tensorflow requires this extra dependency\n # this is required to update the moving mean and moving variance variables\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n self.update = self.solver.minimize(self.loss, global_step=self.global_step)", "def build_optimizer(model: nn.Module, args: Namespace) -> Optimizer:\n params = [{'params': model.parameters(), 'lr': args.init_lr, 'weight_decay': 0}]\n\n return Adam(params)", "def compile(self, optimizer: Union[IOpContainer, Type[IGradientDescent]]):\n pass", "def _get_optimizer(self):\n raise NotImplementedError", "def set_optimizer_sgd(self):\n sgd = optimizers.SGD(lr=self.lr, momentum=self.momentum, decay=0.0, nesterov=False)\n self.optimizer = sgd", "def _inst_optimizer(self):\n optimizer = Optimizers(self.m_cfg['configs']['lr_politics']['optimizer']).value\n lr_schedule = self.m_cfg['configs']['lr_politics']['lr']\n opt = optimizer(learning_rate=lr_schedule)\n return opt", "def sgd_optim(config = None, global_step = None):\n learning_rate = config[\"learning_rate\"]\n \n train_step = tf.train.GradientDescentOptimizer(learning_rate)\n #train_step = tf.train.GradientDescentOptimizer(learning_rate)\n return train_step", "def init_optimizer(self, state_dict=None, use_gpu=True):\n if self.args.fix_embeddings:\n self.network.embedder.src_word_embeddings.fix_word_lut()\n self.network.embedder.tgt_word_embeddings.fix_word_lut()\n\n if self.args.optimizer == 'sgd':\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n self.optimizer = optim.SGD(parameters,\n self.args.learning_rate,\n momentum=self.args.momentum,\n weight_decay=self.args.weight_decay)\n\n elif self.args.optimizer == 'adam':\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n self.optimizer = optim.Adam(parameters,\n self.args.learning_rate,\n weight_decay=self.args.weight_decay)\n elif self.args.optimizer == 'adamW':\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n self.optimizer = optim.AdamW(parameters,\n self.args.learning_rate,\n weight_decay=self.args.weight_decay)\n\n else:\n raise RuntimeError('Unsupported optimizer: %s' % self.args.optimizer)\n\n if state_dict is not None:\n self.optimizer.load_state_dict(state_dict)\n # FIXME: temp soln - https://github.com/pytorch/pytorch/issues/2830\n if use_gpu:\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = v.cuda()", "def _init_optimizer(self, optimizer):\n if optimizer == \"rmsprop\":\n self.optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)\n elif optimizer == \"adagrad\":\n self.optimizer = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)\n elif optimizer == \"adadelta\":\n self.optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)\n elif optimizer == \"adam\":\n self.optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n elif optimizer == \"adamax\":\n self.optimizer = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08) \n elif hasattr(optimizer, __call__):\n self.optimizer = optimizer\n else:\n print \"Error: unsupported optimizer %s\"%optimizer\n sys.exit(0)", "def _init_optimizer(self, optimizer):\n if optimizer == \"rmsprop\":\n self.optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)\n elif optimizer == \"adagrad\":\n self.optimizer = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)\n elif optimizer == \"adadelta\":\n self.optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)\n elif optimizer == \"adam\":\n self.optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n elif optimizer == \"adamax\":\n self.optimizer = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08) \n elif hasattr(optimizer, __call__):\n self.optimizer = optimizer\n else:\n print \"Error: unsupported optimizer %s\"%optimizer\n sys.exit(0)", "def _create_optimizer(self):\n\n with tf.name_scope(\"optimizer\"):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def build(\n self,\n learning_rate_name: str,\n gradients_name: str,\n params_name: str,\n ) -> str:\n\n input_names = [learning_rate_name, gradients_name, params_name]\n\n return self._build_optimizer_node(\n input_names,\n _graph_utils.generate_graph_name(\"update_completed\"),\n \"SGDOptimizerV2\",\n {},\n )", "def build_optimizer(params, optimizer_type='SGD', lr=1e-3,\n weight_decay=0, momentum=0, adam_beta1=0.99,\n adam_beta2=0.99, adam_epsilon=1e-8):\n if optimizer_type == 'SGD':\n optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum,\n weight_decay=weight_decay)\n elif optimizer_type == 'RMSprop':\n optimizer = torch.optim.RMSprop(params, lr=lr, momentum=momentum,\n alpha=0.95,\n eps=0.03,\n weight_decay=weight_decay,\n centered=True)\n elif optimizer_type == 'Adam':\n optimizer = torch.optim.Adam(params, lr=lr,\n betas=(adam_beta1, adam_beta2),\n eps=adam_epsilon,\n weight_decay=weight_decay)\n return optimizer", "def __init__(self, optimizer):\n super(ShardedOptimizer, self).__init__(optimizer, name=\"ShardedOptimizer\")", "def configure_optimizer(learning_rate):\n\tif train_config['optimizer'] == 'adadelta':\n\t\toptimizer = tf.train.AdadeltaOptimizer(learning_rate,\n\t\t rho=train_config['adadelta_rho'],\n\t\t epsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'dadgrad':\n\t\toptimizer = tf.train.AdagradDAOptimizer(\n\t\t\tlearning_rate,\n\t\t\tinitial_gradient_squared_accumulator_value=train_config['adagrad_initial_accumulator_value'])\n\telif train_config['optimizer'] == 'adam':\n\t\toptimizer = tf.train.AdamOptimizer(\n\t\t\tlearning_rate,\n\t\t\tbeta1=train_config['adam_beta1'],\n\t\t\tbeta2=train_config['adam_beta2'],\n\t\t\tepsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'ftrl':\n\t\toptimizer = tf.train.FtrlOptimizer(\n\t\t\tlearning_rate,\n\t\t\tlearning_rate_power=train_config['ftrl_learning_rate_power'],\n\t\t\tinitial_accumulator_value=train_config['ftrl_initial_accumulator_value'],\n\t\t\tl1_regularization_strength=train_config['ftrl_l1'],\n\t\t\tl2_regularization_strength=train_config['ftrl_l2'])\n\telif train_config['optimizer'] == 'momentum':\n\t\toptimizer = tf.train.MomentumOptimizer(\n\t\t\tlearning_rate,\n\t\t\tmomentum=train_config['momentum'],\n\t\t\tname='Momentum')\n\telif train_config['optimizer'] == 'rmsprop':\n\t\toptimizer = tf.train.RMSPropOptimizer(\n\t\t\tlearning_rate,\n\t\t\tdecay=train_config['rmsprop_decay'],\n\t\t\tmomentum=train_config['rmsprop_momentum'],\n\t\t\tepsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'sgd':\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\telse:\n\t\traise ValueError('Optimizer [%s] was not recognized' % train_config['optimizer'])\n\treturn optimizer", "def sgd_optimization(dataset, learning_rate, n_epochs, batch_size):\n datasets = load_data(dataset)\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n #number of minibatches\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\n\n #build the model\n print \"... building the model\"\n\n index = T.lscalar()\n x = T.matrix('x') #data for the rasterized images\n y = T.ivector('y') # labels (int)\n\n # logistic regression Class\n classifierLR = LogisticRegression(input=x, n_in=28*28, n_out=10)\n cost = classifierLR.negative_log_likelihood(y)\n\n # test model (no updates)\n test_model = theano.function(\n inputs=[index],\n outputs=classifierLR.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #validate model (no updates)\n validate_model = theano.function(\n inputs=[index],\n outputs=classifierLR.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #compute the gradient of cost wrt W, b\n g_W = T.grad(cost=cost, wrt=classifierLR.W)\n g_b = T.grad(cost=cost, wrt=classifierLR.b)\n\n #updating expression\n updates = [(classifierLR.W, classifierLR.W - learning_rate * g_W),\n (classifierLR.b, classifierLR.b - learning_rate * g_b)]\n\n # Train model (theano function); updates\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n\n }\n )\n\n # Training model (early stopping with validation examples)\n print \"... training the model\"\n patience = 5000\n patience_inc = 2 # wait this much\n improved_threshold = 0.995 # relative improvement (significant)\n validation_frequency = min(n_train_batches, patience / 2)\n best_validation_loss = numpy.inf\n test_score = 0.\n start_time = timeit.default_timer()\n\n done_looping = False\n epoch = 0\n while (epoch < n_epochs) and (not done_looping):\n epoch += 1\n for minibatch_index in xrange(n_train_batches):\n minibatch_avg_cost = train_model(minibatch_index)\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n # compute loss on validation set\n validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n\n print(\n \"Epoch: %i, minibatch: %i/%i, validation_error: %f %%\" %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n )\n\n if this_validation_loss < best_validation_loss:\n #improve patience if good improvement\n if this_validation_loss < best_validation_loss * improved_threshold:\n patience = max(patience, iter * patience_inc)\n\n best_validation_loss = this_validation_loss\n\n #testing on test_set\n test_losses = [test_model(i) for i in xrange(n_test_batches)]\n test_score = numpy.mean(test_losses)\n\n print(\n (\n \"Epoch : %i, minibatch %i/%i,\"\n \" test error of best model %f %%\"\n ) % (\n epoch,\n minibatch_index,\n n_train_batches,\n test_score * 100.\n )\n )\n\n #save the best model\n print \"New best model found; saving ...\"\n with open('best_model.pkl', \"w\") as f:\n cPickle.dump(classifierLR, f)\n\n if patience <= iter:\n done_looping = True\n break\n\n\n end_time = timeit.default_timer()\n print(\n (\n \"Optimization Complete: best validation score : %f %%,\"\n \" test performance : %f %%\"\n )\n % (best_validation_loss * 100., test_score * 100.)\n )\n print \"The code run for %d epochs, with %f epochs/sec\" %(epoch, 1. * epoch / (end_time - start_time))\n print >> sys.stderr, (\"The code for file \" + os.path.split(__file__)[1] + \" ran for %.1fs\" % ((end_time - start_time)))", "def dist_optimizer(config, optimizer):\n build_strategy, exec_strategy = create_strategy(config)\n\n dist_strategy = DistributedStrategy()\n dist_strategy.execution_strategy = exec_strategy\n dist_strategy.build_strategy = build_strategy\n\n dist_strategy.nccl_comm_num = 1\n dist_strategy.fuse_all_reduce_ops = True\n dist_strategy.fuse_grad_size_in_MB = 16\n optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)\n\n return optimizer", "def add_optimizer(self):\n \n with tf.variable_scope(\"optimizer\"):\n\n # Define optimizer and minimize loss\n if self.OPTIM == \"RMSProp\":\n self.optimizer = tf.train.RMSPropOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"GD\":\n self.optimizer = tf.train.GradientDescentOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"Adam\":\n self.optimizer = tf.train.AdamOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n\n # Merge all summaries for tensorboard\n #self.tbsummaries = tf.summary.merge_all()", "def optimizer_setup(model, params):\n if params.optimizer == 'adam':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_adam(model, params)\n else:\n optimizer = optimizer_handler.plain_adam(model, params)\n elif params.optimizer == 'sgd':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_sgd(model, params)\n else:\n optimizer = optimizer_handler.plain_sgd(model, params)\n\n if params.zero_bn_bias_decay:\n optimizer = zero_wdcay_bn_bias(optimizer)\n\n return optimizer" ]
[ "0.6707388", "0.65863216", "0.6496739", "0.64689183", "0.64221466", "0.6369505", "0.63093096", "0.6238498", "0.6227908", "0.6220927", "0.6189515", "0.6155221", "0.6149654", "0.61448663", "0.6075913", "0.60669714", "0.6042588", "0.60317975", "0.59488785", "0.59424376", "0.59424376", "0.5937445", "0.589208", "0.58878976", "0.5882037", "0.58775437", "0.58614916", "0.58545", "0.5853522", "0.58485234" ]
0.6680733
1
Build loss function, default to `mse`.
def loss(self): return 'mse'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss_fn(self, targets, outputs, model):", "def make_loss(self, logit=None, labels=None):\r\n return nn.functional.mse_loss(logit, labels, reduction='mean') # The MSE Loss\r", "def _build_loss(self, **kwargs):\n pass", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def get_loss_fn():\n return reconstruction", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss", "def _initLoss(self):\n\n return torch.nn.MSELoss()", "def mse_loss1(y_true,y_pred):\n return ((tf.keras.losses.MSE(tf.expand_dims(y_true, axis=0),tf.expand_dims(y_pred, axis=0))))", "def define_loss(name_loss):\n call_dict = {\n \"pixel_weighted_cross_entropy\": pixel_weighted_cross_entropy,\n \"MeanSquaredLogarithmicError\": tf.keras.losses.MeanSquaredLogarithmicError(),\n \"MeanAbsolutePercentageError\": tf.keras.losses.MeanAbsolutePercentageError(),\n \"MeanSquaredError\": tf.keras.losses.MeanSquaredError(),\n \"MeanAbsoluteError\": tf.keras.losses.MeanAbsoluteError(), \n }\n loss = call_dict[name_loss]\n return loss", "def vae_loss_function_factory(reduction='mean'):\n def vae_loss_function(outputs, targets, mean, std_dev):\n outputs_flat = outputs.view(-1, 28 * 28)\n targets_flat = targets.view(-1, 28 * 28)\n if reduction == 'mean':\n image_loss = torch.mean((outputs_flat - targets_flat).pow(2).sum(dim=1))\n latent_loss = -0.5 * torch.mean((1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1))\n elif reduction == 'sum':\n image_loss = torch.sum((outputs_flat - targets_flat).pow(2).sum(dim=1))\n latent_loss = -0.5 * torch.sum((1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1))\n elif reduction == 'none':\n image_loss = (outputs_flat - targets_flat).pow(2).sum(dim=1)\n latent_loss = -0.5 * (1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1)\n else:\n raise NotImplementedError('Reduction ' + reduction + ' not implemented.')\n return image_loss + latent_loss\n return vae_loss_function", "def loss(self, **kwargs):\n pass", "def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def _compute_loss(self, predictions, targets, **params):\n pass", "def mse_loss(model: tf.keras.Model,\n model_input: tf.Tensor,\n model_target: tf.Tensor\n ):\n y_ = model(model_input)\n _reduction_string = \"weighted_sum_over_batch_size\"\n return tf.losses.mean_squared_error(labels=model_target,\n predictions=y_,\n reduction=_reduction_string\n )", "def add_loss_op(self, pred):\n ### YOUR CODE HERE\n loss = cross_entropy_loss(self.labels_placeholder,pred)\n ### END YOUR CODE\n return loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(xs)\n return nn.SoftmaxLoss(y_pred,y)", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def loss_fn(input_d, reconstructed, mean, logvar, beta=1, batch_size=1, input_size=1):\n\n # mse_criterion = nn.MSELoss() # reduction=sum ?\n # mse_loss = mse_criterion(input_d, reconstructed)\n\n # bce_criterion = nn.BCELoss(size_average=False) # reduction=sum ?\n bce_criterion = nn.BCELoss() # reduction=sum ?\n bce_loss = bce_criterion(input_d, reconstructed)\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n\n # for gaussian distribution when\n # generated data passed to the encorder is z~ N(0,1) and generated data is x~N(m,var)\n\n kl_loss = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())\n\n normalized_kl_loss = kl_loss / (batch_size * input_size)\n scaled_kl_loss = beta*normalized_kl_loss\n # scaled_kl_loss = beta*kl_loss\n\n # return bce_loss + kl_loss, bce_loss, kl_loss\n return bce_loss + scaled_kl_loss, bce_loss, normalized_kl_loss\n # return mse_loss + scaled_kl_loss, mse_loss, kl_loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(x)\n return nn.SoftmaxLoss(y_pred,y)", "def sgd_mse_optimizer(model, config):\n learning_rate = config.get(\"lr\", 0.01)\n criterion = nn.MSELoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n return criterion, optimizer", "def get_loss_funcs():\n\n def _eucl_loss(x, y):\n return K.sum(K.square(x - y)) / batch_size / 2\n\n losses = {}\n losses[\"weight_stage1_L1\"] = _eucl_loss\n losses[\"weight_stage1_L2\"] = _eucl_loss\n losses[\"weight_stage2_L1\"] = _eucl_loss\n losses[\"weight_stage2_L2\"] = _eucl_loss\n losses[\"weight_stage3_L1\"] = _eucl_loss\n losses[\"weight_stage3_L2\"] = _eucl_loss\n losses[\"weight_stage4_L1\"] = _eucl_loss\n losses[\"weight_stage4_L2\"] = _eucl_loss\n losses[\"weight_stage5_L1\"] = _eucl_loss\n losses[\"weight_stage5_L2\"] = _eucl_loss\n losses[\"weight_stage6_L1\"] = _eucl_loss\n losses[\"weight_stage6_L2\"] = _eucl_loss\n\n return losses", "def configure_loss_fn(self) -> nn.Module:\n pass", "def _ragged_tensor_mse(y_true, y_pred):\n return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred)", "def mloss(_lambda):\n\n def loss(y_true, y_pred):\n return _lambda * qloss(y_true, y_pred) + (1 - _lambda) * score(y_true, y_pred)\n\n return loss", "def compute_loss(y, tx, w):\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: compute loss by MSE / MAE\n # ***************************************************\n \n # vector e\n e = compute_e(y, tx, w)\n N = compute_N(e)\n L_MSE = np.dot(np.matrix.transpose(e), e)\n L_MSE = L_MSE / (2 * N)\n \n return L_MSE", "def build_loss(self, n_loss, t_loss):\n loss = tf.add(n_loss, t_loss)\n return loss" ]
[ "0.69963366", "0.67683053", "0.67106676", "0.6673715", "0.651554", "0.64021844", "0.639622", "0.63460386", "0.63432056", "0.6308764", "0.63011605", "0.626922", "0.6213675", "0.6199582", "0.61774623", "0.6169041", "0.6168795", "0.61373484", "0.6129343", "0.6110672", "0.61100423", "0.61091244", "0.6108791", "0.6097593", "0.60934544", "0.60879046", "0.6083328", "0.607678", "0.60676014", "0.6062437" ]
0.68811774
1