body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def remover_acentos(text):
'Remove os acentos da string "text". Usada somente na função pre_process\n '
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII') | -9,024,018,088,469,809,000 | Remove os acentos da string "text". Usada somente na função pre_process | complexidade_textual.py | remover_acentos | lflage/complexidade_textual | python | def remover_acentos(text):
'\n '
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII') |
def pre_process(text):
'Realiza um pré processamento da string de entrada "text".\n Retira espaços em branco extras e retira caracteres não alfanuméricos\n '
text = re.sub('\\s{2,}', ' ', text).strip().lower()
doc = nlp(text)
text = ' '.join([token.text for token in doc if ((token.is_alpha == True) and (token.pos_ != 'PUNCT'))])
return remover_acentos(text) | 7,078,584,110,396,675,000 | Realiza um pré processamento da string de entrada "text".
Retira espaços em branco extras e retira caracteres não alfanuméricos | complexidade_textual.py | pre_process | lflage/complexidade_textual | python | def pre_process(text):
'Realiza um pré processamento da string de entrada "text".\n Retira espaços em branco extras e retira caracteres não alfanuméricos\n '
text = re.sub('\\s{2,}', ' ', text).strip().lower()
doc = nlp(text)
text = ' '.join([token.text for token in doc if ((token.is_alpha == True) and (token.pos_ != 'PUNCT'))])
return remover_acentos(text) |
def bi_trigram_counter(sentence_list):
'Retorna uma tupla com o numero de bigramas e trigramas.\n Recebe como entrada o texto segmentado em uma lista de sentencas.\n '
bi_sent_list = []
tri_sent_list = []
for sentence in sentence_list:
proc_sent = pre_process(sentence).lower().split()
bigram_sentence = bigram_model[proc_sent]
bi_sent_list.append(bigram_sentence)
for bi_sent in bi_sent_list:
tri_sent = trigram_model[bi_sent]
tri_sent_list.append(tri_sent)
return (bigram_number(bi_sent_list), trigram_number(tri_sent_list)) | 8,193,444,305,397,279,000 | Retorna uma tupla com o numero de bigramas e trigramas.
Recebe como entrada o texto segmentado em uma lista de sentencas. | complexidade_textual.py | bi_trigram_counter | lflage/complexidade_textual | python | def bi_trigram_counter(sentence_list):
'Retorna uma tupla com o numero de bigramas e trigramas.\n Recebe como entrada o texto segmentado em uma lista de sentencas.\n '
bi_sent_list = []
tri_sent_list = []
for sentence in sentence_list:
proc_sent = pre_process(sentence).lower().split()
bigram_sentence = bigram_model[proc_sent]
bi_sent_list.append(bigram_sentence)
for bi_sent in bi_sent_list:
tri_sent = trigram_model[bi_sent]
tri_sent_list.append(tri_sent)
return (bigram_number(bi_sent_list), trigram_number(tri_sent_list)) |
def bigram_number(bigram_sent_list):
'Conta o número de bigramas encontrados na redação. Recebe uma lista de\n sentenças que configuram a redação.\n '
count = 0
for sent in bigram_sent_list:
for token in sent:
if re.search(u'_', token):
count += 1
return count | 3,878,486,482,182,625,300 | Conta o número de bigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação. | complexidade_textual.py | bigram_number | lflage/complexidade_textual | python | def bigram_number(bigram_sent_list):
'Conta o número de bigramas encontrados na redação. Recebe uma lista de\n sentenças que configuram a redação.\n '
count = 0
for sent in bigram_sent_list:
for token in sent:
if re.search(u'_', token):
count += 1
return count |
def trigram_number(trigram_sent_list):
'Conta o número de trigramas encontrados na redação. Recebe uma lista de\n sentenças que configuram a redação\n '
count = 0
for sent in trigram_sent_list:
for token in sent:
if re.search('(?<=_).+_', token):
count += 1
return count | 5,672,740,640,931,621,000 | Conta o número de trigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação | complexidade_textual.py | trigram_number | lflage/complexidade_textual | python | def trigram_number(trigram_sent_list):
'Conta o número de trigramas encontrados na redação. Recebe uma lista de\n sentenças que configuram a redação\n '
count = 0
for sent in trigram_sent_list:
for token in sent:
if re.search('(?<=_).+_', token):
count += 1
return count |
def n_most_freq_pos_tag_seq(sent_list):
' Procura na lista de sentenças a sequências de pos_tag mais frequentes e\n retorna a quantidade encontrada.\n '
n = 0
pos_list = []
for i in sent_list:
sent_nlp = nlp(i)
sent_pos = []
for token in sent_nlp:
sent_pos.append(token.pos_)
pos_list.append(sent_pos)
for line in pos_list:
if (len(line) < 7):
continue
if (len(line) >= 7):
while (len(line) >= 7):
t = tuple(line[0:7])
if (t in freq_pos_tag):
n += 1
line.pop(0)
return n | 1,770,574,137,913,646,600 | Procura na lista de sentenças a sequências de pos_tag mais frequentes e
retorna a quantidade encontrada. | complexidade_textual.py | n_most_freq_pos_tag_seq | lflage/complexidade_textual | python | def n_most_freq_pos_tag_seq(sent_list):
' Procura na lista de sentenças a sequências de pos_tag mais frequentes e\n retorna a quantidade encontrada.\n '
n = 0
pos_list = []
for i in sent_list:
sent_nlp = nlp(i)
sent_pos = []
for token in sent_nlp:
sent_pos.append(token.pos_)
pos_list.append(sent_pos)
for line in pos_list:
if (len(line) < 7):
continue
if (len(line) >= 7):
while (len(line) >= 7):
t = tuple(line[0:7])
if (t in freq_pos_tag):
n += 1
line.pop(0)
return n |
def subj_n_elements(sentence_list):
' Recebe a lista de sentenças da redação. Conta a quantidade de elementos\n abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do\n Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos\n maior que 7 e também o número total de elementos que fazem parte de um\n sujeito em toda a redação.\n '
r_list = []
for spacy_doc in nlp.pipe(sentence_list):
big_subj = 0
subj_el_total = 0
for token in spacy_doc:
if (token.dep_ == 'nsubj'):
size = len([desc for desc in token.subtree if desc.is_alpha])
if (size >= 7):
big_subj += 1
subj_el_total += size
r_list.append((big_subj, subj_el_total))
return tuple([sum(i) for i in zip(*r_list)]) | 2,093,947,814,462,055,200 | Recebe a lista de sentenças da redação. Conta a quantidade de elementos
abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do
Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos
maior que 7 e também o número total de elementos que fazem parte de um
sujeito em toda a redação. | complexidade_textual.py | subj_n_elements | lflage/complexidade_textual | python | def subj_n_elements(sentence_list):
' Recebe a lista de sentenças da redação. Conta a quantidade de elementos\n abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do\n Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos\n maior que 7 e também o número total de elementos que fazem parte de um\n sujeito em toda a redação.\n '
r_list = []
for spacy_doc in nlp.pipe(sentence_list):
big_subj = 0
subj_el_total = 0
for token in spacy_doc:
if (token.dep_ == 'nsubj'):
size = len([desc for desc in token.subtree if desc.is_alpha])
if (size >= 7):
big_subj += 1
subj_el_total += size
r_list.append((big_subj, subj_el_total))
return tuple([sum(i) for i in zip(*r_list)]) |
def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):
'Add shared or separable branch\n\n convs -> avg pool (optional) -> fcs\n '
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if (num_branch_convs > 0):
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels)
branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if (num_branch_fcs > 0):
if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)):
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return (branch_convs, branch_fcs, last_layer_dim) | -8,956,135,103,474,956,000 | Add shared or separable branch
convs -> avg pool (optional) -> fcs | mmdet/models/roi_heads/bbox_heads/bbox_head_separate.py | _add_conv_fc_branch | Qianna00/mmdetection | python | def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):
'Add shared or separable branch\n\n convs -> avg pool (optional) -> fcs\n '
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if (num_branch_convs > 0):
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels)
branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if (num_branch_fcs > 0):
if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)):
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return (branch_convs, branch_fcs, last_layer_dim) |
def image_upload_url(recipe_id):
'Return URL for recipe image upload'
return reverse('recipe:recipe-upload-image', args=[recipe_id]) | -5,182,356,541,970,528,000 | Return URL for recipe image upload | app/recipe/tests/test_recipe_api.py | image_upload_url | Ahmed-Gemmy/recipe-app-api | python | def image_upload_url(recipe_id):
return reverse('recipe:recipe-upload-image', args=[recipe_id]) |
def detail_url(recipe_id):
'Return recipe detail URL'
return reverse('recipe:recipe-detail', args=[recipe_id]) | 2,485,475,630,251,799,600 | Return recipe detail URL | app/recipe/tests/test_recipe_api.py | detail_url | Ahmed-Gemmy/recipe-app-api | python | def detail_url(recipe_id):
return reverse('recipe:recipe-detail', args=[recipe_id]) |
def sample_tag(user, name='Main Course'):
'Create and return a sample tag'
return Tag.objects.create(user=user, name=name) | 5,368,855,055,190,446,000 | Create and return a sample tag | app/recipe/tests/test_recipe_api.py | sample_tag | Ahmed-Gemmy/recipe-app-api | python | def sample_tag(user, name='Main Course'):
return Tag.objects.create(user=user, name=name) |
def sample_ingredient(user, name='Cinnamon'):
'Create and return a sample ingredient'
return Ingredient.objects.create(user=user, name=name) | -4,042,420,592,265,541,600 | Create and return a sample ingredient | app/recipe/tests/test_recipe_api.py | sample_ingredient | Ahmed-Gemmy/recipe-app-api | python | def sample_ingredient(user, name='Cinnamon'):
return Ingredient.objects.create(user=user, name=name) |
def sample_recipe(user, **params):
'Create and return a sample recipe'
defaults = {'title': 'Sample Recipe', 'time_minutes': 10, 'price': 5.0}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults) | -7,688,439,229,315,674,000 | Create and return a sample recipe | app/recipe/tests/test_recipe_api.py | sample_recipe | Ahmed-Gemmy/recipe-app-api | python | def sample_recipe(user, **params):
defaults = {'title': 'Sample Recipe', 'time_minutes': 10, 'price': 5.0}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults) |
def test_auth_required(self):
'Test that authentication is required'
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) | -3,111,982,648,675,885,600 | Test that authentication is required | app/recipe/tests/test_recipe_api.py | test_auth_required | Ahmed-Gemmy/recipe-app-api | python | def test_auth_required(self):
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) |
def test_retrieve_recipe(self):
'Test retrieving a list of recipe'
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data) | 6,507,012,421,981,992,000 | Test retrieving a list of recipe | app/recipe/tests/test_recipe_api.py | test_retrieve_recipe | Ahmed-Gemmy/recipe-app-api | python | def test_retrieve_recipe(self):
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data) |
def test_recipes_limited_to_user(self):
'Test retrieving recipes for user'
user2 = get_user_model().objects.create_user('[email protected]', 'otherpass')
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data) | -1,777,607,559,456,193,300 | Test retrieving recipes for user | app/recipe/tests/test_recipe_api.py | test_recipes_limited_to_user | Ahmed-Gemmy/recipe-app-api | python | def test_recipes_limited_to_user(self):
user2 = get_user_model().objects.create_user('[email protected]', 'otherpass')
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data) |
def test_view_recipe_detail(self):
'Test viewing a recipe details'
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data) | 6,814,468,299,123,012,000 | Test viewing a recipe details | app/recipe/tests/test_recipe_api.py | test_view_recipe_detail | Ahmed-Gemmy/recipe-app-api | python | def test_view_recipe_detail(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data) |
def test_create_basic_recipe(self):
'Test creating recipe'
payload = {'title': 'Chocolate cheesecake', 'time_minutes': 30, 'price': 5.0}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key)) | -6,009,881,386,472,312,000 | Test creating recipe | app/recipe/tests/test_recipe_api.py | test_create_basic_recipe | Ahmed-Gemmy/recipe-app-api | python | def test_create_basic_recipe(self):
payload = {'title': 'Chocolate cheesecake', 'time_minutes': 30, 'price': 5.0}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key)) |
def test_create_recipe_with_tags(self):
'Test creating recipe with tags'
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {'title': 'Avocado lime cheesecake', 'tags': [tag1.id, tag2.id], 'time_minutes': 60, 'price': 20.0}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags) | -4,151,935,139,778,855,000 | Test creating recipe with tags | app/recipe/tests/test_recipe_api.py | test_create_recipe_with_tags | Ahmed-Gemmy/recipe-app-api | python | def test_create_recipe_with_tags(self):
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {'title': 'Avocado lime cheesecake', 'tags': [tag1.id, tag2.id], 'time_minutes': 60, 'price': 20.0}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags) |
def test_create_recipe_with_ingredients(self):
'Test creating recipe with ingredients'
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {'title': 'Thai prawn red curry', 'ingredients': [ingredient1.id, ingredient2.id], 'time_minutes': 20, 'price': 7.0}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients) | -5,209,038,701,345,602,000 | Test creating recipe with ingredients | app/recipe/tests/test_recipe_api.py | test_create_recipe_with_ingredients | Ahmed-Gemmy/recipe-app-api | python | def test_create_recipe_with_ingredients(self):
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {'title': 'Thai prawn red curry', 'ingredients': [ingredient1.id, ingredient2.id], 'time_minutes': 20, 'price': 7.0}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients) |
def test_partial_update_recipe(self):
'Test updating a recipe with patch'
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags) | -4,550,692,995,664,956,000 | Test updating a recipe with patch | app/recipe/tests/test_recipe_api.py | test_partial_update_recipe | Ahmed-Gemmy/recipe-app-api | python | def test_partial_update_recipe(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags) |
def test_full_update_recipe(self):
'Test update recipe with put'
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {'title': 'Spaghetti Carbonara', 'time_minutes': 25, 'price': 5.0}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0) | -1,186,885,835,863,887,600 | Test update recipe with put | app/recipe/tests/test_recipe_api.py | test_full_update_recipe | Ahmed-Gemmy/recipe-app-api | python | def test_full_update_recipe(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {'title': 'Spaghetti Carbonara', 'time_minutes': 25, 'price': 5.0}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0) |
def test_upload_image_to_recipe(self):
'Test uploading an image to recipe'
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path)) | -3,535,099,213,699,092,500 | Test uploading an image to recipe | app/recipe/tests/test_recipe_api.py | test_upload_image_to_recipe | Ahmed-Gemmy/recipe-app-api | python | def test_upload_image_to_recipe(self):
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path)) |
def test_upload_image_bad_request(self):
'Test uploading an invalid image'
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) | 1,455,644,655,015,498,500 | Test uploading an invalid image | app/recipe/tests/test_recipe_api.py | test_upload_image_bad_request | Ahmed-Gemmy/recipe-app-api | python | def test_upload_image_bad_request(self):
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) |
def test_filter_recipes_by_tags(self):
'returning recipes with specific tags'
recipe1 = sample_recipe(user=self.user, title='Thai vegetables curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(self.user, title='Fish and chips')
res = self.client.get(RECIPE_URL, {'tags': f'{tag1.id}, {tag2.id}'})
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data) | 3,301,451,417,959,241,700 | returning recipes with specific tags | app/recipe/tests/test_recipe_api.py | test_filter_recipes_by_tags | Ahmed-Gemmy/recipe-app-api | python | def test_filter_recipes_by_tags(self):
recipe1 = sample_recipe(user=self.user, title='Thai vegetables curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(self.user, title='Fish and chips')
res = self.client.get(RECIPE_URL, {'tags': f'{tag1.id}, {tag2.id}'})
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data) |
def test_filter_recipes_by_ingredients(self):
'returning recipes with specific ingredients'
recipe1 = sample_recipe(user=self.user, title='Thai vegetables curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
ingredient1 = sample_ingredient(user=self.user, name='Vegan')
ingredient2 = sample_ingredient(user=self.user, name='Vegetarian')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(self.user, title='Fish and chips')
res = self.client.get(RECIPE_URL, {'ingredients': f'{ingredient1.id}, {ingredient2.id}'})
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data) | -2,012,539,018,764,138,000 | returning recipes with specific ingredients | app/recipe/tests/test_recipe_api.py | test_filter_recipes_by_ingredients | Ahmed-Gemmy/recipe-app-api | python | def test_filter_recipes_by_ingredients(self):
recipe1 = sample_recipe(user=self.user, title='Thai vegetables curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
ingredient1 = sample_ingredient(user=self.user, name='Vegan')
ingredient2 = sample_ingredient(user=self.user, name='Vegetarian')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(self.user, title='Fish and chips')
res = self.client.get(RECIPE_URL, {'ingredients': f'{ingredient1.id}, {ingredient2.id}'})
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data) |
def testClusterFirmwareStatusNode(self):
'Test ClusterFirmwareStatusNode'
pass | 6,950,358,348,642,503,000 | Test ClusterFirmwareStatusNode | isi_sdk_8_2_2/test/test_cluster_firmware_status_node.py | testClusterFirmwareStatusNode | Isilon/isilon_sdk_python | python | def testClusterFirmwareStatusNode(self):
pass |
def clients(self, client_type, version=None):
'Returns a python openstack client of the requested type.\n\n The client will be that for one of the temporary non-administrator\n users created before the benchmark launch.\n\n :param client_type: Client type ("nova"/"glance" etc.)\n :param version: client version ("1"/"2" etc.)\n\n :returns: Standard python OpenStack client instance\n '
client = getattr(self._clients, client_type)
return (client(version) if (version is not None) else client()) | 7,614,988,617,192,609,000 | Returns a python openstack client of the requested type.
The client will be that for one of the temporary non-administrator
users created before the benchmark launch.
:param client_type: Client type ("nova"/"glance" etc.)
:param version: client version ("1"/"2" etc.)
:returns: Standard python OpenStack client instance | rally/plugins/openstack/scenario.py | clients | LorenzoBianconi/rally | python | def clients(self, client_type, version=None):
'Returns a python openstack client of the requested type.\n\n The client will be that for one of the temporary non-administrator\n users created before the benchmark launch.\n\n :param client_type: Client type ("nova"/"glance" etc.)\n :param version: client version ("1"/"2" etc.)\n\n :returns: Standard python OpenStack client instance\n '
client = getattr(self._clients, client_type)
return (client(version) if (version is not None) else client()) |
def admin_clients(self, client_type, version=None):
'Returns a python admin openstack client of the requested type.\n\n :param client_type: Client type ("nova"/"glance" etc.)\n :param version: client version ("1"/"2" etc.)\n\n :returns: Python openstack client object\n '
client = getattr(self._admin_clients, client_type)
return (client(version) if (version is not None) else client()) | 1,521,402,105,542,578,400 | Returns a python admin openstack client of the requested type.
:param client_type: Client type ("nova"/"glance" etc.)
:param version: client version ("1"/"2" etc.)
:returns: Python openstack client object | rally/plugins/openstack/scenario.py | admin_clients | LorenzoBianconi/rally | python | def admin_clients(self, client_type, version=None):
'Returns a python admin openstack client of the requested type.\n\n :param client_type: Client type ("nova"/"glance" etc.)\n :param version: client version ("1"/"2" etc.)\n\n :returns: Python openstack client object\n '
client = getattr(self._admin_clients, client_type)
return (client(version) if (version is not None) else client()) |
def test_default() -> None:
'Test the ``default`` parameter of :function:``argcomb.__init__``.\n\n This test also serves to check the basic functionality of\n ``argcomb`` for different types of signature: normal arguments,\n keyword only arguments, positional only arguments, or any\n combination of the above.\n '
def test_func(func: Callable[(..., None)], kw_only_count: int, pos_only_count: int) -> None:
'Test a given function ``f``. '
with pytest.raises(InvalidArgumentCombination):
func()
if (pos_only_count == 0):
func(a=1)
func(a=1, b=1)
func(a=1, b=None)
with pytest.raises(InvalidArgumentCombination):
func(a=None, b=1)
if (kw_only_count < 2):
func(1)
if ((kw_only_count < 2) and (pos_only_count < 2)):
func(1, b=1)
func(1, b=None)
with pytest.raises(InvalidArgumentCombination):
func(None, b=1)
if (kw_only_count == 0):
func(1, 1)
func(1, None)
with pytest.raises(InvalidArgumentCombination):
func(None, 1)
if (pos_only_count < 2):
with pytest.raises(InvalidArgumentCombination):
func(b=1)
@argcomb('a')
def f(a: Any=None, b: Any=None) -> None:
...
test_func(f, kw_only_count=0, pos_only_count=0)
@argcomb('a')
def g(a: Any=None, *, b: Any=None) -> None:
...
test_func(g, kw_only_count=1, pos_only_count=0)
@argcomb('a')
def h(*, a: Any=None, b: Any=None) -> None:
...
test_func(h, kw_only_count=2, pos_only_count=0)
@argcomb('a')
def i(a: Any=None, /, b: Any=None) -> None:
...
test_func(i, kw_only_count=0, pos_only_count=1)
@argcomb('a')
def j(a: Any=None, b: Any=None, /) -> None:
...
test_func(j, kw_only_count=0, pos_only_count=2)
@argcomb('a')
def k(a: Any=None, /, *, b: Any=None) -> None:
...
test_func(k, kw_only_count=1, pos_only_count=1) | 6,416,990,005,546,590,000 | Test the ``default`` parameter of :function:``argcomb.__init__``.
This test also serves to check the basic functionality of
``argcomb`` for different types of signature: normal arguments,
keyword only arguments, positional only arguments, or any
combination of the above. | test.py | test_default | jacobunna/argcomb | python | def test_default() -> None:
'Test the ``default`` parameter of :function:``argcomb.__init__``.\n\n This test also serves to check the basic functionality of\n ``argcomb`` for different types of signature: normal arguments,\n keyword only arguments, positional only arguments, or any\n combination of the above.\n '
def test_func(func: Callable[(..., None)], kw_only_count: int, pos_only_count: int) -> None:
'Test a given function ``f``. '
with pytest.raises(InvalidArgumentCombination):
func()
if (pos_only_count == 0):
func(a=1)
func(a=1, b=1)
func(a=1, b=None)
with pytest.raises(InvalidArgumentCombination):
func(a=None, b=1)
if (kw_only_count < 2):
func(1)
if ((kw_only_count < 2) and (pos_only_count < 2)):
func(1, b=1)
func(1, b=None)
with pytest.raises(InvalidArgumentCombination):
func(None, b=1)
if (kw_only_count == 0):
func(1, 1)
func(1, None)
with pytest.raises(InvalidArgumentCombination):
func(None, 1)
if (pos_only_count < 2):
with pytest.raises(InvalidArgumentCombination):
func(b=1)
@argcomb('a')
def f(a: Any=None, b: Any=None) -> None:
...
test_func(f, kw_only_count=0, pos_only_count=0)
@argcomb('a')
def g(a: Any=None, *, b: Any=None) -> None:
...
test_func(g, kw_only_count=1, pos_only_count=0)
@argcomb('a')
def h(*, a: Any=None, b: Any=None) -> None:
...
test_func(h, kw_only_count=2, pos_only_count=0)
@argcomb('a')
def i(a: Any=None, /, b: Any=None) -> None:
...
test_func(i, kw_only_count=0, pos_only_count=1)
@argcomb('a')
def j(a: Any=None, b: Any=None, /) -> None:
...
test_func(j, kw_only_count=0, pos_only_count=2)
@argcomb('a')
def k(a: Any=None, /, *, b: Any=None) -> None:
...
test_func(k, kw_only_count=1, pos_only_count=1) |
def test_argument_specs() -> None:
'Test providing specifications for arguments. '
@argcomb(a='b', c='d')
def f(a: Any=None, b: Any=None, c: Any=None, d: Any=None) -> None:
...
f()
f(d=1)
f(c=1, d=1)
f(b=1)
f(b=1, d=1)
f(b=1, c=1, d=1)
f(a=1, b=1)
f(a=1, b=1, d=1)
f(a=1, b=1, c=1, d=1)
with pytest.raises(InvalidArgumentCombination):
f(c=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, d=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1, d=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=1, c=1) | -6,400,714,603,336,133,000 | Test providing specifications for arguments. | test.py | test_argument_specs | jacobunna/argcomb | python | def test_argument_specs() -> None:
' '
@argcomb(a='b', c='d')
def f(a: Any=None, b: Any=None, c: Any=None, d: Any=None) -> None:
...
f()
f(d=1)
f(c=1, d=1)
f(b=1)
f(b=1, d=1)
f(b=1, c=1, d=1)
f(a=1, b=1)
f(a=1, b=1, d=1)
f(a=1, b=1, c=1, d=1)
with pytest.raises(InvalidArgumentCombination):
f(c=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, d=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1, d=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=1, c=1) |
def test_value_dependent_specs() -> None:
'Test specifications which depend on argument value. '
@argcomb(a={1: 'b', 2: 'c', 3: 'd'})
def f(a: Any=None, b: Any=None, c: Any=None, d: Any=None) -> None:
...
f()
f(a=1, b=4)
f(a=2, c=5)
f(a=3, d=6)
f(a=1, b=4, c=5)
f(a=1, b=4, c=5, d=6)
f(a=1, b=4, d=6)
f(a=2, c=5, d=6)
f(a=4)
f(b=4, c=5)
f(d=6)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=5)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=5, d=6)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=2, b=3)
with pytest.raises(InvalidArgumentCombination):
f(a=2, d=4)
with pytest.raises(InvalidArgumentCombination):
f(a=3, b=3, c=4)
with pytest.raises(InvalidArgumentCombination):
f(a=3) | 7,844,221,213,456,253,000 | Test specifications which depend on argument value. | test.py | test_value_dependent_specs | jacobunna/argcomb | python | def test_value_dependent_specs() -> None:
' '
@argcomb(a={1: 'b', 2: 'c', 3: 'd'})
def f(a: Any=None, b: Any=None, c: Any=None, d: Any=None) -> None:
...
f()
f(a=1, b=4)
f(a=2, c=5)
f(a=3, d=6)
f(a=1, b=4, c=5)
f(a=1, b=4, c=5, d=6)
f(a=1, b=4, d=6)
f(a=2, c=5, d=6)
f(a=4)
f(b=4, c=5)
f(d=6)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=5)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=5, d=6)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=2, b=3)
with pytest.raises(InvalidArgumentCombination):
f(a=2, d=4)
with pytest.raises(InvalidArgumentCombination):
f(a=3, b=3, c=4)
with pytest.raises(InvalidArgumentCombination):
f(a=3) |
def test_and() -> None:
'Test ``And`` condition. '
@argcomb(And('a', 'b'))
def f(a: Any=None, b: Any=None, c: Any=None) -> None:
...
f(a=1, b=2)
f(a=1, b=2, c=3)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=None)
with pytest.raises(InvalidArgumentCombination):
f(a=None, b=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(c=1)
with pytest.raises(InvalidArgumentCombination):
f() | 6,855,146,975,145,883,000 | Test ``And`` condition. | test.py | test_and | jacobunna/argcomb | python | def test_and() -> None:
' '
@argcomb(And('a', 'b'))
def f(a: Any=None, b: Any=None, c: Any=None) -> None:
...
f(a=1, b=2)
f(a=1, b=2, c=3)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=None)
with pytest.raises(InvalidArgumentCombination):
f(a=None, b=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(c=1)
with pytest.raises(InvalidArgumentCombination):
f() |
def test_or() -> None:
'Test ``Or`` condition. '
@argcomb(Or('a', 'b'))
def f(a: Any=None, b: Any=None) -> None:
...
f(a=1)
f(b=2)
f(a=1, b=2)
with pytest.raises(InvalidArgumentCombination):
f() | -3,000,704,467,450,937,300 | Test ``Or`` condition. | test.py | test_or | jacobunna/argcomb | python | def test_or() -> None:
' '
@argcomb(Or('a', 'b'))
def f(a: Any=None, b: Any=None) -> None:
...
f(a=1)
f(b=2)
f(a=1, b=2)
with pytest.raises(InvalidArgumentCombination):
f() |
def test_not() -> None:
'Test ``Not`` condition. '
@argcomb(Not('a'))
def f(a: Any=None) -> None:
...
f()
with pytest.raises(InvalidArgumentCombination):
f(a=1) | 7,734,399,480,701,754,000 | Test ``Not`` condition. | test.py | test_not | jacobunna/argcomb | python | def test_not() -> None:
' '
@argcomb(Not('a'))
def f(a: Any=None) -> None:
...
f()
with pytest.raises(InvalidArgumentCombination):
f(a=1) |
def test_xor() -> None:
'Test ``Xor`` condition. '
@argcomb(Xor('a', 'b', 'c'))
def f(a: Any=None, b: Any=None, c: Any=None) -> None:
...
f(a=1)
f(b=1)
f(c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f() | -3,551,280,138,037,850,000 | Test ``Xor`` condition. | test.py | test_xor | jacobunna/argcomb | python | def test_xor() -> None:
' '
@argcomb(Xor('a', 'b', 'c'))
def f(a: Any=None, b: Any=None, c: Any=None) -> None:
...
f(a=1)
f(b=1)
f(c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f() |
def test_else() -> None:
'Test ``Else`` in value dependent specifications. '
@argcomb(a={1: 'b', Else: 'c'})
def f(a: Any=None, b: Any=None, c: Any=None) -> None:
...
f(a=2, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=2, b=1) | -5,395,921,926,141,769,000 | Test ``Else`` in value dependent specifications. | test.py | test_else | jacobunna/argcomb | python | def test_else() -> None:
' '
@argcomb(a={1: 'b', Else: 'c'})
def f(a: Any=None, b: Any=None, c: Any=None) -> None:
...
f(a=2, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=2, b=1) |
def test_nested_condition() -> None:
'Test a nested condition. '
@argcomb(Or(And('a', 'b'), And('c', 'd')))
def f(a: Any=None, b: Any=None, c: Any=None, d: Any=None) -> None:
...
f(a=1, b=1)
f(c=1, d=1)
f(a=1, b=1, c=1, d=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f() | 1,444,168,532,430,563,300 | Test a nested condition. | test.py | test_nested_condition | jacobunna/argcomb | python | def test_nested_condition() -> None:
' '
@argcomb(Or(And('a', 'b'), And('c', 'd')))
def f(a: Any=None, b: Any=None, c: Any=None, d: Any=None) -> None:
...
f(a=1, b=1)
f(c=1, d=1)
f(a=1, b=1, c=1, d=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f() |
def test_argument_named_default() -> None:
'Test when an argument is named ``default``.\n\n This collides with a positional only argument named ``default`` in\n the ``argcomb`` signature, but as this is positional only this\n should not matter.\n '
@argcomb(default='a')
def f(default: Any=None, a: Any=None) -> None:
...
f(a=1)
f(default=1, a=1)
with pytest.raises(InvalidArgumentCombination):
f(default=1) | 5,433,050,532,899,201,000 | Test when an argument is named ``default``.
This collides with a positional only argument named ``default`` in
the ``argcomb`` signature, but as this is positional only this
should not matter. | test.py | test_argument_named_default | jacobunna/argcomb | python | def test_argument_named_default() -> None:
'Test when an argument is named ``default``.\n\n This collides with a positional only argument named ``default`` in\n the ``argcomb`` signature, but as this is positional only this\n should not matter.\n '
@argcomb(default='a')
def f(default: Any=None, a: Any=None) -> None:
...
f(a=1)
f(default=1, a=1)
with pytest.raises(InvalidArgumentCombination):
f(default=1) |
def test_arguments_same_name() -> None:
'Test that a warning is emitted when a function with two\n identically named arguments. '
@argcomb(a='b')
def f(a: Any=None, /, b: Any=None, **kwargs: Any) -> None:
...
with pytest.warns(UserWarning):
f(1, 2, a=3) | -8,134,456,218,284,081,000 | Test that a warning is emitted when a function with two
identically named arguments. | test.py | test_arguments_same_name | jacobunna/argcomb | python | def test_arguments_same_name() -> None:
'Test that a warning is emitted when a function with two\n identically named arguments. '
@argcomb(a='b')
def f(a: Any=None, /, b: Any=None, **kwargs: Any) -> None:
...
with pytest.warns(UserWarning):
f(1, 2, a=3) |
def test_default_arguments() -> None:
'Test that default arguments are correctly recognised when they\n are not ``None``. '
@argcomb(a='b')
def f(a: int=1, b: int=2) -> None:
...
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=2, b=2) | -4,885,903,787,462,778,000 | Test that default arguments are correctly recognised when they
are not ``None``. | test.py | test_default_arguments | jacobunna/argcomb | python | def test_default_arguments() -> None:
'Test that default arguments are correctly recognised when they\n are not ``None``. '
@argcomb(a='b')
def f(a: int=1, b: int=2) -> None:
...
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=2, b=2) |
def test_kwargs() -> None:
'Test functionality when signature uses ``**kwargs``. '
@argcomb(a='b')
def f(**kwargs: Any) -> None:
...
f(a=1, b=1)
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1) | 331,529,041,364,599,940 | Test functionality when signature uses ``**kwargs``. | test.py | test_kwargs | jacobunna/argcomb | python | def test_kwargs() -> None:
' '
@argcomb(a='b')
def f(**kwargs: Any) -> None:
...
f(a=1, b=1)
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1) |
def test_func(func: Callable[(..., None)], kw_only_count: int, pos_only_count: int) -> None:
'Test a given function ``f``. '
with pytest.raises(InvalidArgumentCombination):
func()
if (pos_only_count == 0):
func(a=1)
func(a=1, b=1)
func(a=1, b=None)
with pytest.raises(InvalidArgumentCombination):
func(a=None, b=1)
if (kw_only_count < 2):
func(1)
if ((kw_only_count < 2) and (pos_only_count < 2)):
func(1, b=1)
func(1, b=None)
with pytest.raises(InvalidArgumentCombination):
func(None, b=1)
if (kw_only_count == 0):
func(1, 1)
func(1, None)
with pytest.raises(InvalidArgumentCombination):
func(None, 1)
if (pos_only_count < 2):
with pytest.raises(InvalidArgumentCombination):
func(b=1) | 5,027,722,114,730,173,000 | Test a given function ``f``. | test.py | test_func | jacobunna/argcomb | python | def test_func(func: Callable[(..., None)], kw_only_count: int, pos_only_count: int) -> None:
' '
with pytest.raises(InvalidArgumentCombination):
func()
if (pos_only_count == 0):
func(a=1)
func(a=1, b=1)
func(a=1, b=None)
with pytest.raises(InvalidArgumentCombination):
func(a=None, b=1)
if (kw_only_count < 2):
func(1)
if ((kw_only_count < 2) and (pos_only_count < 2)):
func(1, b=1)
func(1, b=None)
with pytest.raises(InvalidArgumentCombination):
func(None, b=1)
if (kw_only_count == 0):
func(1, 1)
func(1, None)
with pytest.raises(InvalidArgumentCombination):
func(None, 1)
if (pos_only_count < 2):
with pytest.raises(InvalidArgumentCombination):
func(b=1) |
def __init__(self, *, host: str='compute.googleapis.com', credentials: credentials.Credentials=None, credentials_file: typing.Optional[str]=None, scopes: typing.Optional[typing.Sequence[str]]=AUTH_SCOPES, quota_project_id: typing.Optional[str]=None, client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, **kwargs) -> None:
"Instantiate the transport.\n\n Args:\n host (Optional[str]): The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is mutually exclusive with credentials.\n scope (Optional[Sequence[str]]): A list of scopes.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\t\n The client info used to send a user-agent string along with\t\n API requests. If ``None``, then default info will be used.\t\n Generally, you only need to set this if you're developing\t\n your own client library.\n "
if (':' not in host):
host += ':443'
self._host = host
if (credentials and credentials_file):
raise exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if (credentials_file is not None):
(credentials, _) = auth.load_credentials_from_file(credentials_file, scopes=scopes, quota_project_id=quota_project_id)
elif (credentials is None):
(credentials, _) = auth.default(scopes=scopes, quota_project_id=quota_project_id)
self._credentials = credentials
self._prep_wrapped_messages(client_info) | 8,155,149,259,227,708,000 | Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library. | google/cloud/compute_v1/services/addresses/transports/base.py | __init__ | igor-solomatov/python-compute | python | def __init__(self, *, host: str='compute.googleapis.com', credentials: credentials.Credentials=None, credentials_file: typing.Optional[str]=None, scopes: typing.Optional[typing.Sequence[str]]=AUTH_SCOPES, quota_project_id: typing.Optional[str]=None, client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, **kwargs) -> None:
"Instantiate the transport.\n\n Args:\n host (Optional[str]): The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is mutually exclusive with credentials.\n scope (Optional[Sequence[str]]): A list of scopes.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\t\n The client info used to send a user-agent string along with\t\n API requests. If ``None``, then default info will be used.\t\n Generally, you only need to set this if you're developing\t\n your own client library.\n "
if (':' not in host):
host += ':443'
self._host = host
if (credentials and credentials_file):
raise exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if (credentials_file is not None):
(credentials, _) = auth.load_credentials_from_file(credentials_file, scopes=scopes, quota_project_id=quota_project_id)
elif (credentials is None):
(credentials, _) = auth.default(scopes=scopes, quota_project_id=quota_project_id)
self._credentials = credentials
self._prep_wrapped_messages(client_info) |
@commands.command(aliases=['c'])
async def clear(self, ctx, amount: int=5):
'Clear AMOUNT messages from chat. Default is 5. Also deletes\n the message that invoked this command.\n\n Usage: "clear <amount>"\n '
(await ctx.message.delete())
(await ctx.channel.purge(limit=amount)) | -3,009,361,088,284,766,000 | Clear AMOUNT messages from chat. Default is 5. Also deletes
the message that invoked this command.
Usage: "clear <amount>" | cogs/Message Management.py | clear | ThomScottW/DiscordBot | python | @commands.command(aliases=['c'])
async def clear(self, ctx, amount: int=5):
'Clear AMOUNT messages from chat. Default is 5. Also deletes\n the message that invoked this command.\n\n Usage: "clear <amount>"\n '
(await ctx.message.delete())
(await ctx.channel.purge(limit=amount)) |
@commands.command(aliases=['cfu', 'clear_user'])
async def clear_from_user(self, ctx, amount: typing.Optional[int]=5, *, username):
'Clear AMOUNT messages from a specific user. Also deletes the message\n that invoked this command.\n\n Usage: "clear <amount> <username>"\n\n Username is the discord username, not server nickname.\n '
msgsSearched = 0
remaining = amount
(await ctx.message.delete())
async for message in ctx.channel.history(limit=1000):
msgsSearched += 1
if (message.author.name == username):
(await message.delete())
remaining -= 1
if (remaining == 0):
break
else:
(await ctx.send(f'There were less than {amount} messages from {username} in the last {msgsSearched} messages.')) | 3,606,354,563,413,580,300 | Clear AMOUNT messages from a specific user. Also deletes the message
that invoked this command.
Usage: "clear <amount> <username>"
Username is the discord username, not server nickname. | cogs/Message Management.py | clear_from_user | ThomScottW/DiscordBot | python | @commands.command(aliases=['cfu', 'clear_user'])
async def clear_from_user(self, ctx, amount: typing.Optional[int]=5, *, username):
'Clear AMOUNT messages from a specific user. Also deletes the message\n that invoked this command.\n\n Usage: "clear <amount> <username>"\n\n Username is the discord username, not server nickname.\n '
msgsSearched = 0
remaining = amount
(await ctx.message.delete())
async for message in ctx.channel.history(limit=1000):
msgsSearched += 1
if (message.author.name == username):
(await message.delete())
remaining -= 1
if (remaining == 0):
break
else:
(await ctx.send(f'There were less than {amount} messages from {username} in the last {msgsSearched} messages.')) |
@commands.command(aliases=['adm'])
async def anchor_delete_manual(self, ctx):
'Delete the messages between two anchors.'
if (ctx.channel.id in self._anchors):
async for message in ctx.channel.history(limit=None):
if (message.id == self._anchors[ctx.channel.id]):
(await message.delete())
del self._anchors[ctx.channel.id]
break
else:
(await message.delete())
else:
self._anchors[ctx.channel.id] = ctx.message.id | 8,211,179,242,101,563,000 | Delete the messages between two anchors. | cogs/Message Management.py | anchor_delete_manual | ThomScottW/DiscordBot | python | @commands.command(aliases=['adm'])
async def anchor_delete_manual(self, ctx):
if (ctx.channel.id in self._anchors):
async for message in ctx.channel.history(limit=None):
if (message.id == self._anchors[ctx.channel.id]):
(await message.delete())
del self._anchors[ctx.channel.id]
break
else:
(await message.delete())
else:
self._anchors[ctx.channel.id] = ctx.message.id |
@commands.command(aliases=['adc'])
async def anchor_delete_choice(self, ctx, bottom: int, top: int):
"Given the message ID's for two messages, delete all messages between them."
bottom_msg = (await ctx.channel.fetch_message(bottom))
top_msg = (await ctx.channel.fetch_message(top))
if (not (bottom_msg.created_at > top_msg.created_at)):
(await ctx.send('Bottom anchor must come after top anchor.'))
return
anchored = False
num_deleted = 0
async for message in ctx.channel.history(limit=None):
if (message.id == bottom):
anchored = True
(await message.delete())
num_deleted += 1
continue
if anchored:
num_deleted += 1
(await message.delete())
if (message.id == top):
anchored = False
break
(await ctx.send(f'Deleted {num_deleted} messages.'))
(await ctx.message.delete()) | 6,886,762,583,653,935,000 | Given the message ID's for two messages, delete all messages between them. | cogs/Message Management.py | anchor_delete_choice | ThomScottW/DiscordBot | python | @commands.command(aliases=['adc'])
async def anchor_delete_choice(self, ctx, bottom: int, top: int):
bottom_msg = (await ctx.channel.fetch_message(bottom))
top_msg = (await ctx.channel.fetch_message(top))
if (not (bottom_msg.created_at > top_msg.created_at)):
(await ctx.send('Bottom anchor must come after top anchor.'))
return
anchored = False
num_deleted = 0
async for message in ctx.channel.history(limit=None):
if (message.id == bottom):
anchored = True
(await message.delete())
num_deleted += 1
continue
if anchored:
num_deleted += 1
(await message.delete())
if (message.id == top):
anchored = False
break
(await ctx.send(f'Deleted {num_deleted} messages.'))
(await ctx.message.delete()) |
@staticmethod
def evaluate(x, amplitude, x_0, alpha):
'One dimensional power law model function'
xx = (x / x_0)
return (amplitude * (xx ** (- alpha))) | -275,028,226,936,232,770 | One dimensional power law model function | astropy/modeling/powerlaws.py | evaluate | JefftheCloudDog/astropy | python | @staticmethod
def evaluate(x, amplitude, x_0, alpha):
xx = (x / x_0)
return (amplitude * (xx ** (- alpha))) |
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha):
'One dimensional power law derivative with respect to parameters'
xx = (x / x_0)
d_amplitude = (xx ** (- alpha))
d_x_0 = (((amplitude * alpha) * d_amplitude) / x_0)
d_alpha = (((- amplitude) * d_amplitude) * np.log(xx))
return [d_amplitude, d_x_0, d_alpha] | 520,824,800,489,536,000 | One dimensional power law derivative with respect to parameters | astropy/modeling/powerlaws.py | fit_deriv | JefftheCloudDog/astropy | python | @staticmethod
def fit_deriv(x, amplitude, x_0, alpha):
xx = (x / x_0)
d_amplitude = (xx ** (- alpha))
d_x_0 = (((amplitude * alpha) * d_amplitude) / x_0)
d_alpha = (((- amplitude) * d_amplitude) * np.log(xx))
return [d_amplitude, d_x_0, d_alpha] |
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2):
'One dimensional broken power law model function'
alpha = np.where((x < x_break), alpha_1, alpha_2)
xx = (x / x_break)
return (amplitude * (xx ** (- alpha))) | -2,823,040,771,623,926,300 | One dimensional broken power law model function | astropy/modeling/powerlaws.py | evaluate | JefftheCloudDog/astropy | python | @staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2):
alpha = np.where((x < x_break), alpha_1, alpha_2)
xx = (x / x_break)
return (amplitude * (xx ** (- alpha))) |
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):
'One dimensional broken power law derivative with respect to parameters'
alpha = np.where((x < x_break), alpha_1, alpha_2)
xx = (x / x_break)
d_amplitude = (xx ** (- alpha))
d_x_break = (((amplitude * alpha) * d_amplitude) / x_break)
d_alpha = (((- amplitude) * d_amplitude) * np.log(xx))
d_alpha_1 = np.where((x < x_break), d_alpha, 0)
d_alpha_2 = np.where((x >= x_break), d_alpha, 0)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2] | 2,307,925,911,302,502,000 | One dimensional broken power law derivative with respect to parameters | astropy/modeling/powerlaws.py | fit_deriv | JefftheCloudDog/astropy | python | @staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):
alpha = np.where((x < x_break), alpha_1, alpha_2)
xx = (x / x_break)
d_amplitude = (xx ** (- alpha))
d_x_break = (((amplitude * alpha) * d_amplitude) / x_break)
d_alpha = (((- amplitude) * d_amplitude) * np.log(xx))
d_alpha_1 = np.where((x < x_break), d_alpha, 0)
d_alpha_2 = np.where((x >= x_break), d_alpha, 0)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2] |
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta):
'One dimensional smoothly broken power law model function'
xx = (x / x_break)
f = np.zeros_like(xx, subok=False)
if isinstance(amplitude, Quantity):
return_unit = amplitude.unit
amplitude = amplitude.value
else:
return_unit = None
logt = (np.log(xx) / delta)
threshold = 30
i = (logt > threshold)
if i.max():
f[i] = ((amplitude * (xx[i] ** (- alpha_2))) / (2.0 ** ((alpha_1 - alpha_2) * delta)))
i = (logt < (- threshold))
if i.max():
f[i] = ((amplitude * (xx[i] ** (- alpha_1))) / (2.0 ** ((alpha_1 - alpha_2) * delta)))
i = (np.abs(logt) <= threshold)
if i.max():
t = np.exp(logt[i])
r = ((1.0 + t) / 2.0)
f[i] = ((amplitude * (xx[i] ** (- alpha_1))) * (r ** ((alpha_1 - alpha_2) * delta)))
if return_unit:
return Quantity(f, unit=return_unit, copy=False)
return f | -3,852,438,868,734,511,000 | One dimensional smoothly broken power law model function | astropy/modeling/powerlaws.py | evaluate | JefftheCloudDog/astropy | python | @staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta):
xx = (x / x_break)
f = np.zeros_like(xx, subok=False)
if isinstance(amplitude, Quantity):
return_unit = amplitude.unit
amplitude = amplitude.value
else:
return_unit = None
logt = (np.log(xx) / delta)
threshold = 30
i = (logt > threshold)
if i.max():
f[i] = ((amplitude * (xx[i] ** (- alpha_2))) / (2.0 ** ((alpha_1 - alpha_2) * delta)))
i = (logt < (- threshold))
if i.max():
f[i] = ((amplitude * (xx[i] ** (- alpha_1))) / (2.0 ** ((alpha_1 - alpha_2) * delta)))
i = (np.abs(logt) <= threshold)
if i.max():
t = np.exp(logt[i])
r = ((1.0 + t) / 2.0)
f[i] = ((amplitude * (xx[i] ** (- alpha_1))) * (r ** ((alpha_1 - alpha_2) * delta)))
if return_unit:
return Quantity(f, unit=return_unit, copy=False)
return f |
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta):
'One dimensional smoothly broken power law derivative with respect\n to parameters'
xx = (x / x_break)
logt = (np.log(xx) / delta)
f = np.zeros_like(xx)
d_amplitude = np.zeros_like(xx)
d_x_break = np.zeros_like(xx)
d_alpha_1 = np.zeros_like(xx)
d_alpha_2 = np.zeros_like(xx)
d_delta = np.zeros_like(xx)
threshold = 30
i = (logt > threshold)
if i.max():
f[i] = ((amplitude * (xx[i] ** (- alpha_2))) / (2.0 ** ((alpha_1 - alpha_2) * delta)))
d_amplitude[i] = (f[i] / amplitude)
d_x_break[i] = ((f[i] * alpha_2) / x_break)
d_alpha_1[i] = (f[i] * ((- delta) * np.log(2)))
d_alpha_2[i] = (f[i] * ((- np.log(xx[i])) + (delta * np.log(2))))
d_delta[i] = (f[i] * ((- (alpha_1 - alpha_2)) * np.log(2)))
i = (logt < (- threshold))
if i.max():
f[i] = ((amplitude * (xx[i] ** (- alpha_1))) / (2.0 ** ((alpha_1 - alpha_2) * delta)))
d_amplitude[i] = (f[i] / amplitude)
d_x_break[i] = ((f[i] * alpha_1) / x_break)
d_alpha_1[i] = (f[i] * ((- np.log(xx[i])) - (delta * np.log(2))))
d_alpha_2[i] = ((f[i] * delta) * np.log(2))
d_delta[i] = (f[i] * ((- (alpha_1 - alpha_2)) * np.log(2)))
i = (np.abs(logt) <= threshold)
if i.max():
t = np.exp(logt[i])
r = ((1.0 + t) / 2.0)
f[i] = ((amplitude * (xx[i] ** (- alpha_1))) * (r ** ((alpha_1 - alpha_2) * delta)))
d_amplitude[i] = (f[i] / amplitude)
d_x_break[i] = ((f[i] * (alpha_1 - ((((alpha_1 - alpha_2) * t) / 2.0) / r))) / x_break)
d_alpha_1[i] = (f[i] * ((- np.log(xx[i])) + (delta * np.log(r))))
d_alpha_2[i] = (f[i] * ((- delta) * np.log(r)))
d_delta[i] = ((f[i] * (alpha_1 - alpha_2)) * (np.log(r) - (((t / (1.0 + t)) / delta) * np.log(xx[i]))))
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta] | -2,195,259,169,781,833,700 | One dimensional smoothly broken power law derivative with respect
to parameters | astropy/modeling/powerlaws.py | fit_deriv | JefftheCloudDog/astropy | python | @staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta):
'One dimensional smoothly broken power law derivative with respect\n to parameters'
xx = (x / x_break)
logt = (np.log(xx) / delta)
f = np.zeros_like(xx)
d_amplitude = np.zeros_like(xx)
d_x_break = np.zeros_like(xx)
d_alpha_1 = np.zeros_like(xx)
d_alpha_2 = np.zeros_like(xx)
d_delta = np.zeros_like(xx)
threshold = 30
i = (logt > threshold)
if i.max():
f[i] = ((amplitude * (xx[i] ** (- alpha_2))) / (2.0 ** ((alpha_1 - alpha_2) * delta)))
d_amplitude[i] = (f[i] / amplitude)
d_x_break[i] = ((f[i] * alpha_2) / x_break)
d_alpha_1[i] = (f[i] * ((- delta) * np.log(2)))
d_alpha_2[i] = (f[i] * ((- np.log(xx[i])) + (delta * np.log(2))))
d_delta[i] = (f[i] * ((- (alpha_1 - alpha_2)) * np.log(2)))
i = (logt < (- threshold))
if i.max():
f[i] = ((amplitude * (xx[i] ** (- alpha_1))) / (2.0 ** ((alpha_1 - alpha_2) * delta)))
d_amplitude[i] = (f[i] / amplitude)
d_x_break[i] = ((f[i] * alpha_1) / x_break)
d_alpha_1[i] = (f[i] * ((- np.log(xx[i])) - (delta * np.log(2))))
d_alpha_2[i] = ((f[i] * delta) * np.log(2))
d_delta[i] = (f[i] * ((- (alpha_1 - alpha_2)) * np.log(2)))
i = (np.abs(logt) <= threshold)
if i.max():
t = np.exp(logt[i])
r = ((1.0 + t) / 2.0)
f[i] = ((amplitude * (xx[i] ** (- alpha_1))) * (r ** ((alpha_1 - alpha_2) * delta)))
d_amplitude[i] = (f[i] / amplitude)
d_x_break[i] = ((f[i] * (alpha_1 - ((((alpha_1 - alpha_2) * t) / 2.0) / r))) / x_break)
d_alpha_1[i] = (f[i] * ((- np.log(xx[i])) + (delta * np.log(r))))
d_alpha_2[i] = (f[i] * ((- delta) * np.log(r)))
d_delta[i] = ((f[i] * (alpha_1 - alpha_2)) * (np.log(r) - (((t / (1.0 + t)) / delta) * np.log(xx[i]))))
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta] |
@staticmethod
def evaluate(x, amplitude, x_0, alpha, x_cutoff):
'One dimensional exponential cutoff power law model function'
xx = (x / x_0)
return ((amplitude * (xx ** (- alpha))) * np.exp(((- x) / x_cutoff))) | 2,218,565,850,251,482,000 | One dimensional exponential cutoff power law model function | astropy/modeling/powerlaws.py | evaluate | JefftheCloudDog/astropy | python | @staticmethod
def evaluate(x, amplitude, x_0, alpha, x_cutoff):
xx = (x / x_0)
return ((amplitude * (xx ** (- alpha))) * np.exp(((- x) / x_cutoff))) |
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
'One dimensional exponential cutoff power law derivative with respect to parameters'
xx = (x / x_0)
xc = (x / x_cutoff)
d_amplitude = ((xx ** (- alpha)) * np.exp((- xc)))
d_x_0 = (((alpha * amplitude) * d_amplitude) / x_0)
d_alpha = (((- amplitude) * d_amplitude) * np.log(xx))
d_x_cutoff = (((amplitude * x) * d_amplitude) / (x_cutoff ** 2))
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff] | -4,989,707,162,353,163,000 | One dimensional exponential cutoff power law derivative with respect to parameters | astropy/modeling/powerlaws.py | fit_deriv | JefftheCloudDog/astropy | python | @staticmethod
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
xx = (x / x_0)
xc = (x / x_cutoff)
d_amplitude = ((xx ** (- alpha)) * np.exp((- xc)))
d_x_0 = (((alpha * amplitude) * d_amplitude) / x_0)
d_alpha = (((- amplitude) * d_amplitude) * np.log(xx))
d_x_cutoff = (((amplitude * x) * d_amplitude) / (x_cutoff ** 2))
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff] |
@staticmethod
def evaluate(x, amplitude, x_0, alpha, beta):
'One dimensional log parabola model function'
xx = (x / x_0)
exponent = ((- alpha) - (beta * np.log(xx)))
return (amplitude * (xx ** exponent)) | 1,702,451,252,748,684,300 | One dimensional log parabola model function | astropy/modeling/powerlaws.py | evaluate | JefftheCloudDog/astropy | python | @staticmethod
def evaluate(x, amplitude, x_0, alpha, beta):
xx = (x / x_0)
exponent = ((- alpha) - (beta * np.log(xx)))
return (amplitude * (xx ** exponent)) |
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, beta):
'One dimensional log parabola derivative with respect to parameters'
xx = (x / x_0)
log_xx = np.log(xx)
exponent = ((- alpha) - (beta * log_xx))
d_amplitude = (xx ** exponent)
d_beta = (((- amplitude) * d_amplitude) * (log_xx ** 2))
d_x_0 = ((amplitude * d_amplitude) * (((beta * log_xx) / x_0) - (exponent / x_0)))
d_alpha = (((- amplitude) * d_amplitude) * log_xx)
return [d_amplitude, d_x_0, d_alpha, d_beta] | -5,864,375,728,540,962,000 | One dimensional log parabola derivative with respect to parameters | astropy/modeling/powerlaws.py | fit_deriv | JefftheCloudDog/astropy | python | @staticmethod
def fit_deriv(x, amplitude, x_0, alpha, beta):
xx = (x / x_0)
log_xx = np.log(xx)
exponent = ((- alpha) - (beta * log_xx))
d_amplitude = (xx ** exponent)
d_beta = (((- amplitude) * d_amplitude) * (log_xx ** 2))
d_x_0 = ((amplitude * d_amplitude) * (((beta * log_xx) / x_0) - (exponent / x_0)))
d_alpha = (((- amplitude) * d_amplitude) * log_xx)
return [d_amplitude, d_x_0, d_alpha, d_beta] |
@staticmethod
def evaluate(mag, phi_star, m_star, alpha):
'Schechter luminosity function model function.'
if (isinstance(mag, Quantity) or isinstance(m_star, Quantity)):
raise ValueError('mag and m_star must not have units')
factor = (10 ** (0.4 * (m_star - mag)))
return ((((0.4 * np.log(10)) * phi_star) * (factor ** (alpha + 1))) * np.exp((- factor))) | 643,891,343,457,198,600 | Schechter luminosity function model function. | astropy/modeling/powerlaws.py | evaluate | JefftheCloudDog/astropy | python | @staticmethod
def evaluate(mag, phi_star, m_star, alpha):
if (isinstance(mag, Quantity) or isinstance(m_star, Quantity)):
raise ValueError('mag and m_star must not have units')
factor = (10 ** (0.4 * (m_star - mag)))
return ((((0.4 * np.log(10)) * phi_star) * (factor ** (alpha + 1))) * np.exp((- factor))) |
@staticmethod
def fit_deriv(mag, phi_star, m_star, alpha):
'\n Schechter luminosity function derivative with respect to\n parameters.\n '
if (isinstance(mag, Quantity) or isinstance(m_star, Quantity)):
raise ValueError('mag and m_star must not have units')
factor = (10 ** (0.4 * (m_star - mag)))
d_phi_star = (((0.4 * np.log(10)) * (factor ** (alpha + 1))) * np.exp((- factor)))
func = (phi_star * d_phi_star)
d_m_star = (((((alpha + 1) * 0.4) * np.log(10)) * func) - (((0.4 * np.log(10)) * func) * factor))
d_alpha = (func * np.log(factor))
return [d_phi_star, d_m_star, d_alpha] | -4,267,011,896,546,974,700 | Schechter luminosity function derivative with respect to
parameters. | astropy/modeling/powerlaws.py | fit_deriv | JefftheCloudDog/astropy | python | @staticmethod
def fit_deriv(mag, phi_star, m_star, alpha):
'\n Schechter luminosity function derivative with respect to\n parameters.\n '
if (isinstance(mag, Quantity) or isinstance(m_star, Quantity)):
raise ValueError('mag and m_star must not have units')
factor = (10 ** (0.4 * (m_star - mag)))
d_phi_star = (((0.4 * np.log(10)) * (factor ** (alpha + 1))) * np.exp((- factor)))
func = (phi_star * d_phi_star)
d_m_star = (((((alpha + 1) * 0.4) * np.log(10)) * func) - (((0.4 * np.log(10)) * func) * factor))
d_alpha = (func * np.log(factor))
return [d_phi_star, d_m_star, d_alpha] |
@click.group()
def cli():
'\n Serves the application for testing locally. If you want to test it\n in a production like environment, please deploy with Docker.\n\n :return: Application instance\n '
click.echo('\x1b[95mINFO: Starting the app..\x1b[0m')
app.run() | -6,949,805,830,813,337,000 | Serves the application for testing locally. If you want to test it
in a production like environment, please deploy with Docker.
:return: Application instance | cli/commands/cmd_serve.py | cli | laith43d/JUJU-User-Example | python | @click.group()
def cli():
'\n Serves the application for testing locally. If you want to test it\n in a production like environment, please deploy with Docker.\n\n :return: Application instance\n '
click.echo('\x1b[95mINFO: Starting the app..\x1b[0m')
app.run() |
def __init__(self, content: str=None, expire: int=None, name: str=None, rtype: str=None):
'Initialize the DnsEntry object.\n\n Closely represent the TransIP dnsEntry object\n\n :param content: content (rdata) corresponding to the record type\n (e.g. ip), defaults to None\n :type content: str, optional\n :param expire: Time To Live (TTL) of the record, defaults to None\n :type expire: int, optional\n :param name: name of the record, defaults to None\n :type name: str, optional\n :param rtype: one of the (allowed) record types (see DNS_RECORD_TYPES),\n defaults to None\n :type rtype: str, optional\n '
self.content = content
self.expire = expire
self.name = name
self.rtype = None
self.rtype = rtype | -4,195,593,731,745,652,000 | Initialize the DnsEntry object.
Closely represent the TransIP dnsEntry object
:param content: content (rdata) corresponding to the record type
(e.g. ip), defaults to None
:type content: str, optional
:param expire: Time To Live (TTL) of the record, defaults to None
:type expire: int, optional
:param name: name of the record, defaults to None
:type name: str, optional
:param rtype: one of the (allowed) record types (see DNS_RECORD_TYPES),
defaults to None
:type rtype: str, optional | transip_dns/transip_interface.py | __init__ | bheuvel/transip_dns | python | def __init__(self, content: str=None, expire: int=None, name: str=None, rtype: str=None):
'Initialize the DnsEntry object.\n\n Closely represent the TransIP dnsEntry object\n\n :param content: content (rdata) corresponding to the record type\n (e.g. ip), defaults to None\n :type content: str, optional\n :param expire: Time To Live (TTL) of the record, defaults to None\n :type expire: int, optional\n :param name: name of the record, defaults to None\n :type name: str, optional\n :param rtype: one of the (allowed) record types (see DNS_RECORD_TYPES),\n defaults to None\n :type rtype: str, optional\n '
self.content = content
self.expire = expire
self.name = name
self.rtype = None
self.rtype = rtype |
def __repr__(self) -> str:
'Represent the TransIP definition of a dnsEntry object.\n\n The dnsEntry object is specified as a JSON object\n\n :return: JSON representation of the record according to the dnsEntry\n :rtype: str\n '
return {'dnsEntry': {'name': self.name, 'expire': self.expire, 'type': self.rtype, 'content': self.content}} | 3,583,720,378,407,534,600 | Represent the TransIP definition of a dnsEntry object.
The dnsEntry object is specified as a JSON object
:return: JSON representation of the record according to the dnsEntry
:rtype: str | transip_dns/transip_interface.py | __repr__ | bheuvel/transip_dns | python | def __repr__(self) -> str:
'Represent the TransIP definition of a dnsEntry object.\n\n The dnsEntry object is specified as a JSON object\n\n :return: JSON representation of the record according to the dnsEntry\n :rtype: str\n '
return {'dnsEntry': {'name': self.name, 'expire': self.expire, 'type': self.rtype, 'content': self.content}} |
def __init__(self, name: str, rtype: str, expire: str, content: str, zone: str, query_data: Union[(str, None)]=None) -> None:
'Initialize the DnsRecord object with safety checks.\n\n :param name: name of the DNS record\n :type name: str\n :param rtype: type of the DNS record\n :type rtype: str\n :param expire: TTL of the DNS record\n :type expire: str\n :param content: content of the DNS record\n :type content: str\n :param zone: Zone or domain of the DNS record\n :type zone: str\n :param query_data: url which produces the exact data to be used as\n content, defaults to None\n :type query_data: Union[str, None], optional\n :raises ValueError: Raise an error if an invalid record type is specified\n '
if (rtype is not None):
if (not (rtype.upper() in DNS_RECORD_TYPES)):
raise ValueError(f"Type '{rtype}' is not one of the allowed record types ({DNS_RECORD_TYPES})")
super().__init__(content=content, expire=expire, name=name, rtype=rtype)
self.zone = zone
self.fqdn = f'{self.name}.{self.zone}'
if query_data:
self.content = DnsRecord.query_for_content(query_data)
logger.info(f"Resolved record data to be used: '{self.content}'")
self.record_state = None | 8,442,978,875,264,505,000 | Initialize the DnsRecord object with safety checks.
:param name: name of the DNS record
:type name: str
:param rtype: type of the DNS record
:type rtype: str
:param expire: TTL of the DNS record
:type expire: str
:param content: content of the DNS record
:type content: str
:param zone: Zone or domain of the DNS record
:type zone: str
:param query_data: url which produces the exact data to be used as
content, defaults to None
:type query_data: Union[str, None], optional
:raises ValueError: Raise an error if an invalid record type is specified | transip_dns/transip_interface.py | __init__ | bheuvel/transip_dns | python | def __init__(self, name: str, rtype: str, expire: str, content: str, zone: str, query_data: Union[(str, None)]=None) -> None:
'Initialize the DnsRecord object with safety checks.\n\n :param name: name of the DNS record\n :type name: str\n :param rtype: type of the DNS record\n :type rtype: str\n :param expire: TTL of the DNS record\n :type expire: str\n :param content: content of the DNS record\n :type content: str\n :param zone: Zone or domain of the DNS record\n :type zone: str\n :param query_data: url which produces the exact data to be used as\n content, defaults to None\n :type query_data: Union[str, None], optional\n :raises ValueError: Raise an error if an invalid record type is specified\n '
if (rtype is not None):
if (not (rtype.upper() in DNS_RECORD_TYPES)):
raise ValueError(f"Type '{rtype}' is not one of the allowed record types ({DNS_RECORD_TYPES})")
super().__init__(content=content, expire=expire, name=name, rtype=rtype)
self.zone = zone
self.fqdn = f'{self.name}.{self.zone}'
if query_data:
self.content = DnsRecord.query_for_content(query_data)
logger.info(f"Resolved record data to be used: '{self.content}'")
self.record_state = None |
@property
def dnsentry(self):
'Return the TransIP representation of the dnsEntry object.'
return super().__repr__() | -1,604,088,412,028,155,100 | Return the TransIP representation of the dnsEntry object. | transip_dns/transip_interface.py | dnsentry | bheuvel/transip_dns | python | @property
def dnsentry(self):
return super().__repr__() |
@staticmethod
def query_for_content(query_url: str) -> str:
'Retrieve the ip address from the "current" location.\n\n By default it will query for an ip (v4/v6) address,\n but may be used for other data as well\n\n :param query_url: url which produces the exact data\n to be used as content\n :type query_url: str\n :raises RequestsRaisedException: raised for connection errors with the server\n :raises Non200Response: raised when server does not respond "OK" (200)\n :return: the resolved ip address, or whatever may be\n returned by a custom provided url\n :rtype: str\n '
my_ip = None
try:
ip_query = requests.get(query_url)
except Exception as e:
raise RequestsRaisedException('Error in request for Internet ip address; ') from e
if (ip_query.status_code == 200):
my_ip = ip_query.text.strip()
else:
raise Non200Response(f'Could not resolve Internet ip address (non 200 response); {ip_query.status_code}: {ip_query.reason}')
return my_ip | 122,964,255,606,669,380 | Retrieve the ip address from the "current" location.
By default it will query for an ip (v4/v6) address,
but may be used for other data as well
:param query_url: url which produces the exact data
to be used as content
:type query_url: str
:raises RequestsRaisedException: raised for connection errors with the server
:raises Non200Response: raised when server does not respond "OK" (200)
:return: the resolved ip address, or whatever may be
returned by a custom provided url
:rtype: str | transip_dns/transip_interface.py | query_for_content | bheuvel/transip_dns | python | @staticmethod
def query_for_content(query_url: str) -> str:
'Retrieve the ip address from the "current" location.\n\n By default it will query for an ip (v4/v6) address,\n but may be used for other data as well\n\n :param query_url: url which produces the exact data\n to be used as content\n :type query_url: str\n :raises RequestsRaisedException: raised for connection errors with the server\n :raises Non200Response: raised when server does not respond "OK" (200)\n :return: the resolved ip address, or whatever may be\n returned by a custom provided url\n :rtype: str\n '
my_ip = None
try:
ip_query = requests.get(query_url)
except Exception as e:
raise RequestsRaisedException('Error in request for Internet ip address; ') from e
if (ip_query.status_code == 200):
my_ip = ip_query.text.strip()
else:
raise Non200Response(f'Could not resolve Internet ip address (non 200 response); {ip_query.status_code}: {ip_query.reason}')
return my_ip |
def __init__(self, login: str=None, private_key_pem: str=None, private_key_pem_file: Path=None, access_token: str=None, expiration_time: int=60, read_only: bool=False, global_key: bool=False, label: str=f'{__project__} {__version__}', authentication_url: str='https://api.transip.nl/v6/auth', root_endpoint: str='https://api.transip.nl/v6', connection_timeout: int=30, retry: int=3, retry_delay: float=5):
'Initialize the interface with TransIP.\n\n :param login: the TransIP login name, defaults to None\n :type login: str, optional\n :param private_key_pem: the private key as string, defaults to None\n :type private_key_pem: str, optional\n :param private_key_pem_file: file location of the private key, defaults to None\n :type private_key_pem_file: Path, optional\n :param access_token: JSON Web Token, defaults to None\n :type access_token: str, optional\n :param expiration_time: expiration time (TTL) of the access token,\n defaults to 60\n :type expiration_time: int, optional\n :param read_only: key/token allows to change objects or only read,\n defaults to False\n :type read_only: bool, optional\n :param global_key: key may only be used from whitelisted ip addresses,\n defaults to False\n :type global_key: bool, optional\n :param label: textual identifier for the access token,\n defaults to "__project__ __version__"\n :type label: str, optional\n :param authentication_url: TransIP authentication url,\n defaults to "https://api.transip.nl/v6/auth"\n :type authentication_url: str, optional\n :param root_endpoint: TransIP root of endpoints,\n defaults to "https://api.transip.nl/v6"\n :type root_endpoint: str, optional\n :param connection_timeout: timeout for the network response, defaults to 30\n :type connection_timeout: int, optional\n :param retry: retry when the call fails due to zone\n being saved or locked (409), defaults to 3\n :type retry: int, optional\n :param retry_delay: time in seconds to wait between retries,\n defaults to 5\n :type retry_delay: float, optional\n '
if ((login is not None) and (access_token is not None)):
raise ValueError('Either login and private_key or access token must be used, not both.')
self.attempts = (retry + 1)
self.retry_delay = retry_delay
self.root_endpoint = root_endpoint
self.connection_timeout = connection_timeout
if (access_token is None):
self._token = AccessToken(login=login, private_key=private_key_pem, private_key_file=private_key_pem_file, expiration_time=expiration_time, read_only=read_only, global_key=global_key, label=label, authentication_url=authentication_url, connection_timeout=connection_timeout)
else:
self._token = access_token | 9,129,667,430,303,725,000 | Initialize the interface with TransIP.
:param login: the TransIP login name, defaults to None
:type login: str, optional
:param private_key_pem: the private key as string, defaults to None
:type private_key_pem: str, optional
:param private_key_pem_file: file location of the private key, defaults to None
:type private_key_pem_file: Path, optional
:param access_token: JSON Web Token, defaults to None
:type access_token: str, optional
:param expiration_time: expiration time (TTL) of the access token,
defaults to 60
:type expiration_time: int, optional
:param read_only: key/token allows to change objects or only read,
defaults to False
:type read_only: bool, optional
:param global_key: key may only be used from whitelisted ip addresses,
defaults to False
:type global_key: bool, optional
:param label: textual identifier for the access token,
defaults to "__project__ __version__"
:type label: str, optional
:param authentication_url: TransIP authentication url,
defaults to "https://api.transip.nl/v6/auth"
:type authentication_url: str, optional
:param root_endpoint: TransIP root of endpoints,
defaults to "https://api.transip.nl/v6"
:type root_endpoint: str, optional
:param connection_timeout: timeout for the network response, defaults to 30
:type connection_timeout: int, optional
:param retry: retry when the call fails due to zone
being saved or locked (409), defaults to 3
:type retry: int, optional
:param retry_delay: time in seconds to wait between retries,
defaults to 5
:type retry_delay: float, optional | transip_dns/transip_interface.py | __init__ | bheuvel/transip_dns | python | def __init__(self, login: str=None, private_key_pem: str=None, private_key_pem_file: Path=None, access_token: str=None, expiration_time: int=60, read_only: bool=False, global_key: bool=False, label: str=f'{__project__} {__version__}', authentication_url: str='https://api.transip.nl/v6/auth', root_endpoint: str='https://api.transip.nl/v6', connection_timeout: int=30, retry: int=3, retry_delay: float=5):
'Initialize the interface with TransIP.\n\n :param login: the TransIP login name, defaults to None\n :type login: str, optional\n :param private_key_pem: the private key as string, defaults to None\n :type private_key_pem: str, optional\n :param private_key_pem_file: file location of the private key, defaults to None\n :type private_key_pem_file: Path, optional\n :param access_token: JSON Web Token, defaults to None\n :type access_token: str, optional\n :param expiration_time: expiration time (TTL) of the access token,\n defaults to 60\n :type expiration_time: int, optional\n :param read_only: key/token allows to change objects or only read,\n defaults to False\n :type read_only: bool, optional\n :param global_key: key may only be used from whitelisted ip addresses,\n defaults to False\n :type global_key: bool, optional\n :param label: textual identifier for the access token,\n defaults to "__project__ __version__"\n :type label: str, optional\n :param authentication_url: TransIP authentication url,\n defaults to "https://api.transip.nl/v6/auth"\n :type authentication_url: str, optional\n :param root_endpoint: TransIP root of endpoints,\n defaults to "https://api.transip.nl/v6"\n :type root_endpoint: str, optional\n :param connection_timeout: timeout for the network response, defaults to 30\n :type connection_timeout: int, optional\n :param retry: retry when the call fails due to zone\n being saved or locked (409), defaults to 3\n :type retry: int, optional\n :param retry_delay: time in seconds to wait between retries,\n defaults to 5\n :type retry_delay: float, optional\n '
if ((login is not None) and (access_token is not None)):
raise ValueError('Either login and private_key or access token must be used, not both.')
self.attempts = (retry + 1)
self.retry_delay = retry_delay
self.root_endpoint = root_endpoint
self.connection_timeout = connection_timeout
if (access_token is None):
self._token = AccessToken(login=login, private_key=private_key_pem, private_key_file=private_key_pem_file, expiration_time=expiration_time, read_only=read_only, global_key=global_key, label=label, authentication_url=authentication_url, connection_timeout=connection_timeout)
else:
self._token = access_token |
@property
def headers(self) -> Dict:
'Generate the default headers.\n\n Note the the reference to "self._token" will allways\n provide a valid (and renewed if needed) token\n\n :return: default headers, including the authentication token\n :rtype: Dict\n '
return {'Content-Type': 'application/json', 'Authorization': f'Bearer {self._token}', 'User-Agent': f'{__project__} {__version__}'} | -3,138,354,725,542,010,000 | Generate the default headers.
Note the the reference to "self._token" will allways
provide a valid (and renewed if needed) token
:return: default headers, including the authentication token
:rtype: Dict | transip_dns/transip_interface.py | headers | bheuvel/transip_dns | python | @property
def headers(self) -> Dict:
'Generate the default headers.\n\n Note the the reference to "self._token" will allways\n provide a valid (and renewed if needed) token\n\n :return: default headers, including the authentication token\n :rtype: Dict\n '
return {'Content-Type': 'application/json', 'Authorization': f'Bearer {self._token}', 'User-Agent': f'{__project__} {__version__}'} |
def execute_dns_entry(self, method: str, rest_path: str, dnsentry: dict):
'Execute the requested action, with retry on 409.\n\n 409: ~ "DNS Entries are currently being saved"\n 409: ~ "is locked"\n\n\n :param method: get, post, patch, delete\n :type method: str\n :param zone_name: respective DNS zone\n :type zone_name: str\n :param dnsentry: DNS entry to manage\n :type dnsentry: dict\n :raises requests.exceptions.HTTPError: Raise an error\n if a 400 or 500 response is returned\n :return: the requests response\n :rtype: requests.models.Response\n '
endpoint = f'{self.root_endpoint}{rest_path}'
request = getattr(requests, method)
response = None
for attempt in range(1, (self.attempts + 1)):
response = request(url=endpoint, json=dnsentry, headers=self.headers, timeout=self.connection_timeout)
if (response.status_code != 409):
response.raise_for_status()
logger.debug(f'API request returned {response.status_code}')
return response
logger.debug(f'API request returned {response.status_code}: {response.text}, atttempt {attempt} of {self.attempts}')
sleep(self.retry_delay)
response.raise_for_status() | 6,041,315,292,962,485,000 | Execute the requested action, with retry on 409.
409: ~ "DNS Entries are currently being saved"
409: ~ "is locked"
:param method: get, post, patch, delete
:type method: str
:param zone_name: respective DNS zone
:type zone_name: str
:param dnsentry: DNS entry to manage
:type dnsentry: dict
:raises requests.exceptions.HTTPError: Raise an error
if a 400 or 500 response is returned
:return: the requests response
:rtype: requests.models.Response | transip_dns/transip_interface.py | execute_dns_entry | bheuvel/transip_dns | python | def execute_dns_entry(self, method: str, rest_path: str, dnsentry: dict):
'Execute the requested action, with retry on 409.\n\n 409: ~ "DNS Entries are currently being saved"\n 409: ~ "is locked"\n\n\n :param method: get, post, patch, delete\n :type method: str\n :param zone_name: respective DNS zone\n :type zone_name: str\n :param dnsentry: DNS entry to manage\n :type dnsentry: dict\n :raises requests.exceptions.HTTPError: Raise an error\n if a 400 or 500 response is returned\n :return: the requests response\n :rtype: requests.models.Response\n '
endpoint = f'{self.root_endpoint}{rest_path}'
request = getattr(requests, method)
response = None
for attempt in range(1, (self.attempts + 1)):
response = request(url=endpoint, json=dnsentry, headers=self.headers, timeout=self.connection_timeout)
if (response.status_code != 409):
response.raise_for_status()
logger.debug(f'API request returned {response.status_code}')
return response
logger.debug(f'API request returned {response.status_code}: {response.text}, atttempt {attempt} of {self.attempts}')
sleep(self.retry_delay)
response.raise_for_status() |
def domains(self) -> list:
'Get a listing of all available domains.\n\n [extended_summary]\n\n :return: List of available domains\n :rtype: list\n '
return self.execute_dns_entry('get', '/domains', None) | 2,543,835,990,408,454,000 | Get a listing of all available domains.
[extended_summary]
:return: List of available domains
:rtype: list | transip_dns/transip_interface.py | domains | bheuvel/transip_dns | python | def domains(self) -> list:
'Get a listing of all available domains.\n\n [extended_summary]\n\n :return: List of available domains\n :rtype: list\n '
return self.execute_dns_entry('get', '/domains', None) |
def get_dns_entry(self, dns_zone_name: str) -> Dict:
'Get a listing of the respective domain.'
response = self.execute_dns_entry('get', rest_path=f'/domains/{dns_zone_name}/dns', dnsentry=None)
return response | 8,240,839,006,028,131,000 | Get a listing of the respective domain. | transip_dns/transip_interface.py | get_dns_entry | bheuvel/transip_dns | python | def get_dns_entry(self, dns_zone_name: str) -> Dict:
response = self.execute_dns_entry('get', rest_path=f'/domains/{dns_zone_name}/dns', dnsentry=None)
return response |
def post_dns_entry(self, dns_record: DnsRecord):
'Add a dnsEntry to the respective domain.'
return self.execute_dns_entry('post', rest_path=f'/domains/{dns_record.zone}/dns', dnsentry=dns_record.dnsentry) | 4,100,742,298,039,812,600 | Add a dnsEntry to the respective domain. | transip_dns/transip_interface.py | post_dns_entry | bheuvel/transip_dns | python | def post_dns_entry(self, dns_record: DnsRecord):
return self.execute_dns_entry('post', rest_path=f'/domains/{dns_record.zone}/dns', dnsentry=dns_record.dnsentry) |
def patch_dns_entry(self, dns_record: DnsRecord):
'Adjust a record in the respective domain.'
return self.execute_dns_entry('patch', rest_path=f'/domains/{dns_record.zone}/dns', dnsentry=dns_record.dnsentry) | -5,313,160,516,114,981,000 | Adjust a record in the respective domain. | transip_dns/transip_interface.py | patch_dns_entry | bheuvel/transip_dns | python | def patch_dns_entry(self, dns_record: DnsRecord):
return self.execute_dns_entry('patch', rest_path=f'/domains/{dns_record.zone}/dns', dnsentry=dns_record.dnsentry) |
def delete_dns_entry(self, dns_record: DnsRecord):
'Delete an entry in the respective domain.'
return self.execute_dns_entry('delete', rest_path=f'/domains/{dns_record.zone}/dns', dnsentry=dns_record.dnsentry) | 756,937,102,717,403,100 | Delete an entry in the respective domain. | transip_dns/transip_interface.py | delete_dns_entry | bheuvel/transip_dns | python | def delete_dns_entry(self, dns_record: DnsRecord):
return self.execute_dns_entry('delete', rest_path=f'/domains/{dns_record.zone}/dns', dnsentry=dns_record.dnsentry) |
def GenerateDateParamYql(params):
'[Check consistency in date parameterGenerate the date yql parameters]\n \n Arguments:\n params {[type]} -- [description]\n \n Returns:\n [type] -- [description]\n '
yql = ''
if ('fromDate' in params):
if params['fromDate'].isdigit():
yql += (' and published_date > ' + str(params['fromDate']))
if ('toDate' in params):
if params['toDate'].isdigit():
yql += (' and published_date < ' + str(params['toDate']))
if (('fromDate' in params) and ('toDate' in params)):
if (params['fromDate'].isdigit() and params['toDate'].isdigit()):
if (params['fromDate'] > params['toDate']):
return ''
return yql | -8,374,745,931,147,657,000 | [Check consistency in date parameterGenerate the date yql parameters]
Arguments:
params {[type]} -- [description]
Returns:
[type] -- [description] | news_api/endpoints/vespaSearcher.py | GenerateDateParamYql | rdoume/News_API | python | def GenerateDateParamYql(params):
'[Check consistency in date parameterGenerate the date yql parameters]\n \n Arguments:\n params {[type]} -- [description]\n \n Returns:\n [type] -- [description]\n '
yql =
if ('fromDate' in params):
if params['fromDate'].isdigit():
yql += (' and published_date > ' + str(params['fromDate']))
if ('toDate' in params):
if params['toDate'].isdigit():
yql += (' and published_date < ' + str(params['toDate']))
if (('fromDate' in params) and ('toDate' in params)):
if (params['fromDate'].isdigit() and params['toDate'].isdigit()):
if (params['fromDate'] > params['toDate']):
return
return yql |
def GenerateNewsYql(params):
'[Generator of YQL vespa query, to have a refine request on the vespa cluster]\n In this case, the YQL depends on the search definition of the document type in the vespa cluster\n Modify with risk, some parameters position are important, such as limit\n \n Returns:\n yql [string] -- [String that select documents based on userquery\n '
yql = '&yql=select * from sources * where userQuery()'
if ('source' in params):
yql += ((' and hostsite contains " ' + params['source']) + ' "')
if ('language' in params):
yql += ((' and country contains "' + params['language']) + '"')
yql += GenerateDateParamYql(params)
if ('count' in params):
if params['count'].isdigit():
yql += (' limit ' + str(params['count']))
if ('offset' in params):
if params['offset'].isdigit():
yql += (' offset ' + str(params['offset']))
return yql | -753,313,291,014,727,800 | [Generator of YQL vespa query, to have a refine request on the vespa cluster]
In this case, the YQL depends on the search definition of the document type in the vespa cluster
Modify with risk, some parameters position are important, such as limit
Returns:
yql [string] -- [String that select documents based on userquery | news_api/endpoints/vespaSearcher.py | GenerateNewsYql | rdoume/News_API | python | def GenerateNewsYql(params):
'[Generator of YQL vespa query, to have a refine request on the vespa cluster]\n In this case, the YQL depends on the search definition of the document type in the vespa cluster\n Modify with risk, some parameters position are important, such as limit\n \n Returns:\n yql [string] -- [String that select documents based on userquery\n '
yql = '&yql=select * from sources * where userQuery()'
if ('source' in params):
yql += ((' and hostsite contains " ' + params['source']) + ' "')
if ('language' in params):
yql += ((' and country contains "' + params['language']) + '"')
yql += GenerateDateParamYql(params)
if ('count' in params):
if params['count'].isdigit():
yql += (' limit ' + str(params['count']))
if ('offset' in params):
if params['offset'].isdigit():
yql += (' offset ' + str(params['offset']))
return yql |
def vespaSearch(params):
'Search Function for Vespa:\n \n \n Arguments:\n params {dict} -- [Dict containings all parameters for the Vespa Search]\n List of accepted params:\n :param query: User query to search (required)\n :param toDate: Maximum datelimit for the publication date (optionnal, default = now() )\n :param fromDate: Minimum datelimit for the publication date (optionnal)\n :param count: Number of document to retrieve (optionnal, default = 10)\n :param offset: Offset for the retrieved documents ( optionnal, default = 0)\n :param source: Filter for the accepter hostsites (optionnal)\n '
result = None
if ('query' not in params):
return None
else:
yql = GenerateNewsYql(params)
try:
print(((((((('http://' + VESPA_IP) + ':') + VESPA_PORT) + '/search/?query=') + params['query']) + yql) + ';'))
result_request = requests.get(((((((('http://' + VESPA_IP) + ':') + VESPA_PORT) + '/search/?query=') + params['query']) + yql) + ';'))
if (result_request.status_code == 200):
result = result_request.json()
except Exception as e:
print(e)
return None
return result | 3,787,824,880,471,384,600 | Search Function for Vespa:
Arguments:
params {dict} -- [Dict containings all parameters for the Vespa Search]
List of accepted params:
:param query: User query to search (required)
:param toDate: Maximum datelimit for the publication date (optionnal, default = now() )
:param fromDate: Minimum datelimit for the publication date (optionnal)
:param count: Number of document to retrieve (optionnal, default = 10)
:param offset: Offset for the retrieved documents ( optionnal, default = 0)
:param source: Filter for the accepter hostsites (optionnal) | news_api/endpoints/vespaSearcher.py | vespaSearch | rdoume/News_API | python | def vespaSearch(params):
'Search Function for Vespa:\n \n \n Arguments:\n params {dict} -- [Dict containings all parameters for the Vespa Search]\n List of accepted params:\n :param query: User query to search (required)\n :param toDate: Maximum datelimit for the publication date (optionnal, default = now() )\n :param fromDate: Minimum datelimit for the publication date (optionnal)\n :param count: Number of document to retrieve (optionnal, default = 10)\n :param offset: Offset for the retrieved documents ( optionnal, default = 0)\n :param source: Filter for the accepter hostsites (optionnal)\n '
result = None
if ('query' not in params):
return None
else:
yql = GenerateNewsYql(params)
try:
print(((((((('http://' + VESPA_IP) + ':') + VESPA_PORT) + '/search/?query=') + params['query']) + yql) + ';'))
result_request = requests.get(((((((('http://' + VESPA_IP) + ':') + VESPA_PORT) + '/search/?query=') + params['query']) + yql) + ';'))
if (result_request.status_code == 200):
result = result_request.json()
except Exception as e:
print(e)
return None
return result |
def test_cpu_memory_usage(duthosts, enum_rand_one_per_hwsku_hostname, setup_thresholds):
'Check DUT memory usage and process cpu usage are within threshold.'
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
MonitResult = namedtuple('MonitResult', ['processes', 'memory'])
monit_results = duthost.monit_process(iterations=24)['monit_results']
(memory_threshold, normal_cpu_threshold, high_cpu_consume_procs) = setup_thresholds
persist_threshold = 8
outstanding_mem_polls = {}
outstanding_procs = {}
outstanding_procs_counter = Counter()
for (i, monit_result) in enumerate((MonitResult(*_) for _ in monit_results)):
logging.debug('------ Iteration %d ------', i)
check_memory(i, memory_threshold, monit_result, outstanding_mem_polls)
for proc in monit_result.processes:
cpu_threshold = normal_cpu_threshold
if (proc['name'] in high_cpu_consume_procs):
cpu_threshold = high_cpu_consume_procs[proc['name']]
check_cpu_usage(cpu_threshold, outstanding_procs, outstanding_procs_counter, proc)
analyse_monitoring_results(cpu_threshold, memory_threshold, outstanding_mem_polls, outstanding_procs, outstanding_procs_counter, persist_threshold) | 1,628,707,251,410,047,700 | Check DUT memory usage and process cpu usage are within threshold. | tests/platform_tests/test_cpu_memory_usage.py | test_cpu_memory_usage | jsanghra/sonic-mgmt | python | def test_cpu_memory_usage(duthosts, enum_rand_one_per_hwsku_hostname, setup_thresholds):
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
MonitResult = namedtuple('MonitResult', ['processes', 'memory'])
monit_results = duthost.monit_process(iterations=24)['monit_results']
(memory_threshold, normal_cpu_threshold, high_cpu_consume_procs) = setup_thresholds
persist_threshold = 8
outstanding_mem_polls = {}
outstanding_procs = {}
outstanding_procs_counter = Counter()
for (i, monit_result) in enumerate((MonitResult(*_) for _ in monit_results)):
logging.debug('------ Iteration %d ------', i)
check_memory(i, memory_threshold, monit_result, outstanding_mem_polls)
for proc in monit_result.processes:
cpu_threshold = normal_cpu_threshold
if (proc['name'] in high_cpu_consume_procs):
cpu_threshold = high_cpu_consume_procs[proc['name']]
check_cpu_usage(cpu_threshold, outstanding_procs, outstanding_procs_counter, proc)
analyse_monitoring_results(cpu_threshold, memory_threshold, outstanding_mem_polls, outstanding_procs, outstanding_procs_counter, persist_threshold) |
def test_cpu_memory_usage_counterpoll(duthosts, enum_rand_one_per_hwsku_hostname, setup_thresholds, restore_counter_poll, counterpoll_type, counterpoll_cpu_threshold):
'Check DUT memory usage and process cpu usage are within threshold.\n Disable all counterpoll types except tested one\n Collect memory and CPUs usage for 60 secs\n Compare the memory usage with the memory threshold\n Compare the average cpu usage with the cpu threshold for the specified progress\n Restore counterpolls status\n '
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
program_to_check = get_manufacturer_program_to_check(duthost)
if (program_to_check is None):
pytest.skip('Skip no program is offered to check')
(memory_threshold, _, _) = setup_thresholds
counterpoll_cpu_usage_threshold = counterpoll_cpu_threshold[counterpoll_type]
MonitResult = namedtuple('MonitResult', ['processes', 'memory'])
disable_all_counterpoll_type_except_tested(duthost, counterpoll_type)
monit_results = duthost.monit_process(iterations=60, delay_interval=1)['monit_results']
poll_interval = (CounterpollConstants.COUNTERPOLL_INTERVAL[counterpoll_type] // 1000)
outstanding_mem_polls = {}
outstanding_procs = {}
outstanding_procs_counter = Counter()
cpu_usage_program_to_check = []
prepare_ram_cpu_usage_results(MonitResult, counterpoll_cpu_usage_threshold, memory_threshold, monit_results, outstanding_mem_polls, outstanding_procs, outstanding_procs_counter, program_to_check, cpu_usage_program_to_check)
log_cpu_usage_by_vendor(cpu_usage_program_to_check, counterpoll_type)
cpu_usage_average = caculate_cpu_usge_average_value(extract_valid_cpu_usage_data(cpu_usage_program_to_check, poll_interval), cpu_usage_program_to_check)
logging.info('Average cpu_usage is {}'.format(cpu_usage_average))
assert (cpu_usage_average < counterpoll_cpu_usage_threshold), 'cpu_usage_average of {} exceeds the cpu threshold:{}'.format(program_to_check, counterpoll_cpu_usage_threshold)
assert (not outstanding_mem_polls), ' Memory {} exceeds the memory threshold {} '.format(outstanding_mem_polls, memory_threshold) | -5,749,021,165,275,632,000 | Check DUT memory usage and process cpu usage are within threshold.
Disable all counterpoll types except tested one
Collect memory and CPUs usage for 60 secs
Compare the memory usage with the memory threshold
Compare the average cpu usage with the cpu threshold for the specified progress
Restore counterpolls status | tests/platform_tests/test_cpu_memory_usage.py | test_cpu_memory_usage_counterpoll | jsanghra/sonic-mgmt | python | def test_cpu_memory_usage_counterpoll(duthosts, enum_rand_one_per_hwsku_hostname, setup_thresholds, restore_counter_poll, counterpoll_type, counterpoll_cpu_threshold):
'Check DUT memory usage and process cpu usage are within threshold.\n Disable all counterpoll types except tested one\n Collect memory and CPUs usage for 60 secs\n Compare the memory usage with the memory threshold\n Compare the average cpu usage with the cpu threshold for the specified progress\n Restore counterpolls status\n '
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
program_to_check = get_manufacturer_program_to_check(duthost)
if (program_to_check is None):
pytest.skip('Skip no program is offered to check')
(memory_threshold, _, _) = setup_thresholds
counterpoll_cpu_usage_threshold = counterpoll_cpu_threshold[counterpoll_type]
MonitResult = namedtuple('MonitResult', ['processes', 'memory'])
disable_all_counterpoll_type_except_tested(duthost, counterpoll_type)
monit_results = duthost.monit_process(iterations=60, delay_interval=1)['monit_results']
poll_interval = (CounterpollConstants.COUNTERPOLL_INTERVAL[counterpoll_type] // 1000)
outstanding_mem_polls = {}
outstanding_procs = {}
outstanding_procs_counter = Counter()
cpu_usage_program_to_check = []
prepare_ram_cpu_usage_results(MonitResult, counterpoll_cpu_usage_threshold, memory_threshold, monit_results, outstanding_mem_polls, outstanding_procs, outstanding_procs_counter, program_to_check, cpu_usage_program_to_check)
log_cpu_usage_by_vendor(cpu_usage_program_to_check, counterpoll_type)
cpu_usage_average = caculate_cpu_usge_average_value(extract_valid_cpu_usage_data(cpu_usage_program_to_check, poll_interval), cpu_usage_program_to_check)
logging.info('Average cpu_usage is {}'.format(cpu_usage_average))
assert (cpu_usage_average < counterpoll_cpu_usage_threshold), 'cpu_usage_average of {} exceeds the cpu threshold:{}'.format(program_to_check, counterpoll_cpu_usage_threshold)
assert (not outstanding_mem_polls), ' Memory {} exceeds the memory threshold {} '.format(outstanding_mem_polls, memory_threshold) |
def extract_valid_cpu_usage_data(program_to_check_cpu_usage, poll_interval):
'\n This method it to extract the valid cpu usage data according to the poll_interval\n 1. Find the index for the max one for every poll interval,\n 2. Discard the data if the index is on the edge(0 o the length of program_to_check_cpu_usage -1)\n 3. If the index is closed in the neighbour interval, only keep the former one\n 4. Return all indexes\n For example:\n poll_interval = 10\n 7, 1, 0, 1, 0, 1, 5, 1, 1,2, 0, 1, 0, 1, 0, 6, 1, 1, 1,2\n return [15]\n 0, 1, 0, 1, 0, 1, 0, 1, 0, 8, 7, 1, 0, 1, 0, 6, 1, 1, 1,2\n return [9]\n '
valid_cpu_usage_center_index_list = []
poll_number = (len(program_to_check_cpu_usage) // poll_interval)
def find_max_cpu_usage(cpu_usage_list, poll_times):
max_cpu_usage = cpu_usage_list[0]
max_cpu_usage_index = 0
for (i, cpu_usage) in enumerate(cpu_usage_list):
if (cpu_usage > max_cpu_usage):
max_cpu_usage = cpu_usage
max_cpu_usage_index = i
return [max_cpu_usage, (max_cpu_usage_index + (poll_times * poll_interval))]
for i in range(0, poll_number):
(max_cpu_usage, max_cpu_usage_index) = find_max_cpu_usage(program_to_check_cpu_usage[(poll_interval * i):(poll_interval * (i + 1))], i)
if ((max_cpu_usage_index == 0) or (max_cpu_usage_index == (len(program_to_check_cpu_usage) - 1))):
logging.info('The data is on the edge:{}, discard it '.format(max_cpu_usage_index))
else:
if (valid_cpu_usage_center_index_list and ((valid_cpu_usage_center_index_list[(- 1)] + 1) == max_cpu_usage_index)):
continue
valid_cpu_usage_center_index_list.append(max_cpu_usage_index)
return valid_cpu_usage_center_index_list | -8,353,977,908,569,143,000 | This method it to extract the valid cpu usage data according to the poll_interval
1. Find the index for the max one for every poll interval,
2. Discard the data if the index is on the edge(0 o the length of program_to_check_cpu_usage -1)
3. If the index is closed in the neighbour interval, only keep the former one
4. Return all indexes
For example:
poll_interval = 10
7, 1, 0, 1, 0, 1, 5, 1, 1,2, 0, 1, 0, 1, 0, 6, 1, 1, 1,2
return [15]
0, 1, 0, 1, 0, 1, 0, 1, 0, 8, 7, 1, 0, 1, 0, 6, 1, 1, 1,2
return [9] | tests/platform_tests/test_cpu_memory_usage.py | extract_valid_cpu_usage_data | jsanghra/sonic-mgmt | python | def extract_valid_cpu_usage_data(program_to_check_cpu_usage, poll_interval):
'\n This method it to extract the valid cpu usage data according to the poll_interval\n 1. Find the index for the max one for every poll interval,\n 2. Discard the data if the index is on the edge(0 o the length of program_to_check_cpu_usage -1)\n 3. If the index is closed in the neighbour interval, only keep the former one\n 4. Return all indexes\n For example:\n poll_interval = 10\n 7, 1, 0, 1, 0, 1, 5, 1, 1,2, 0, 1, 0, 1, 0, 6, 1, 1, 1,2\n return [15]\n 0, 1, 0, 1, 0, 1, 0, 1, 0, 8, 7, 1, 0, 1, 0, 6, 1, 1, 1,2\n return [9]\n '
valid_cpu_usage_center_index_list = []
poll_number = (len(program_to_check_cpu_usage) // poll_interval)
def find_max_cpu_usage(cpu_usage_list, poll_times):
max_cpu_usage = cpu_usage_list[0]
max_cpu_usage_index = 0
for (i, cpu_usage) in enumerate(cpu_usage_list):
if (cpu_usage > max_cpu_usage):
max_cpu_usage = cpu_usage
max_cpu_usage_index = i
return [max_cpu_usage, (max_cpu_usage_index + (poll_times * poll_interval))]
for i in range(0, poll_number):
(max_cpu_usage, max_cpu_usage_index) = find_max_cpu_usage(program_to_check_cpu_usage[(poll_interval * i):(poll_interval * (i + 1))], i)
if ((max_cpu_usage_index == 0) or (max_cpu_usage_index == (len(program_to_check_cpu_usage) - 1))):
logging.info('The data is on the edge:{}, discard it '.format(max_cpu_usage_index))
else:
if (valid_cpu_usage_center_index_list and ((valid_cpu_usage_center_index_list[(- 1)] + 1) == max_cpu_usage_index)):
continue
valid_cpu_usage_center_index_list.append(max_cpu_usage_index)
return valid_cpu_usage_center_index_list |
def _get_filename(filepath):
' get filename from path '
return str(Path(filepath).name) | -2,063,787,766,986,508,800 | get filename from path | pyImageCropper/pyImageCropper.py | _get_filename | Writ3r/pyImageCropper | python | def _get_filename(filepath):
' '
return str(Path(filepath).name) |
def _scale_image(img, maxLen, maxHeight):
' scale image to under the specified maxLen and maxHeight '
scale = 1
resized_img = img.copy()
if (resized_img.size[0] > maxLen):
resize = (resized_img.size[0] / maxLen)
answer = (int((resized_img.size[0] / resize)), int((resized_img.size[1] / resize)))
scale = resize
resized_img = resized_img.resize(answer, Image.ANTIALIAS)
if (resized_img.size[1] > maxHeight):
resize = (resized_img.size[1] / maxHeight)
answer = (int((resized_img.size[0] / resize)), int((resized_img.size[1] / resize)))
scale = (scale * resize)
resized_img = resized_img.resize(answer, Image.ANTIALIAS)
return (resized_img, scale) | 1,490,627,980,130,205,000 | scale image to under the specified maxLen and maxHeight | pyImageCropper/pyImageCropper.py | _scale_image | Writ3r/pyImageCropper | python | def _scale_image(img, maxLen, maxHeight):
' '
scale = 1
resized_img = img.copy()
if (resized_img.size[0] > maxLen):
resize = (resized_img.size[0] / maxLen)
answer = (int((resized_img.size[0] / resize)), int((resized_img.size[1] / resize)))
scale = resize
resized_img = resized_img.resize(answer, Image.ANTIALIAS)
if (resized_img.size[1] > maxHeight):
resize = (resized_img.size[1] / maxHeight)
answer = (int((resized_img.size[0] / resize)), int((resized_img.size[1] / resize)))
scale = (scale * resize)
resized_img = resized_img.resize(answer, Image.ANTIALIAS)
return (resized_img, scale) |
def _point_on_image(point, img):
' check if point is on the image '
(x, y) = point
if ((x >= 0) and (x <= img.size[0])):
if ((y >= 0) and (y <= img.size[1])):
return True
return False | 440,278,461,843,085,300 | check if point is on the image | pyImageCropper/pyImageCropper.py | _point_on_image | Writ3r/pyImageCropper | python | def _point_on_image(point, img):
' '
(x, y) = point
if ((x >= 0) and (x <= img.size[0])):
if ((y >= 0) and (y <= img.size[1])):
return True
return False |
def roll_image(self, imgLoc):
' changes canvas to a new image '
self.img = Image.open(imgLoc)
(self.resized_img, self.scale) = _scale_image(self.img, self.canvasWidth, self.canvasHeight)
self.currImage = ImageTk.PhotoImage(self.resized_img)
self.canvas.delete('all')
self.canvas.config(width=self.resized_img.size[0], height=self.resized_img.size[1])
self.canvas_image = self.canvas.create_image(0, 0, anchor=tkinter.NW, image=self.currImage)
self.canvas.pack(fill=tkinter.BOTH, expand=tkinter.YES)
self._build_crop_box()
self._refresh_crop_rectangle() | -1,111,047,446,190,721,800 | changes canvas to a new image | pyImageCropper/pyImageCropper.py | roll_image | Writ3r/pyImageCropper | python | def roll_image(self, imgLoc):
' '
self.img = Image.open(imgLoc)
(self.resized_img, self.scale) = _scale_image(self.img, self.canvasWidth, self.canvasHeight)
self.currImage = ImageTk.PhotoImage(self.resized_img)
self.canvas.delete('all')
self.canvas.config(width=self.resized_img.size[0], height=self.resized_img.size[1])
self.canvas_image = self.canvas.create_image(0, 0, anchor=tkinter.NW, image=self.currImage)
self.canvas.pack(fill=tkinter.BOTH, expand=tkinter.YES)
self._build_crop_box()
self._refresh_crop_rectangle() |
def _build_crop_box(self):
' creates the box for the crop rectangle x1,y1,x2,y2'
boxMax = min(self.resized_img.size[0], self.resized_img.size[1])
newImgLen = (boxMax - (boxMax % (self.boxBasePx / self.scale)))
self.box = [0, 0, newImgLen, newImgLen] | 4,377,888,991,730,752,500 | creates the box for the crop rectangle x1,y1,x2,y2 | pyImageCropper/pyImageCropper.py | _build_crop_box | Writ3r/pyImageCropper | python | def _build_crop_box(self):
' '
boxMax = min(self.resized_img.size[0], self.resized_img.size[1])
newImgLen = (boxMax - (boxMax % (self.boxBasePx / self.scale)))
self.box = [0, 0, newImgLen, newImgLen] |
def _refresh_crop_rectangle(self, deltaX=0, deltaY=0):
' re-builds the crop rectangle based on the specified box '
if ((self.rectangle and (deltaX > 0)) or (deltaY > 0)):
self.canvas.move(self.rectangle, deltaX, deltaY)
else:
self.canvas.delete(self.rectangle)
self.rectangle = self.canvas.create_rectangle(self.box[0], self.box[1], self.box[2], self.box[3], outline='red', width=2) | -7,967,907,055,618,979,000 | re-builds the crop rectangle based on the specified box | pyImageCropper/pyImageCropper.py | _refresh_crop_rectangle | Writ3r/pyImageCropper | python | def _refresh_crop_rectangle(self, deltaX=0, deltaY=0):
' '
if ((self.rectangle and (deltaX > 0)) or (deltaY > 0)):
self.canvas.move(self.rectangle, deltaX, deltaY)
else:
self.canvas.delete(self.rectangle)
self.rectangle = self.canvas.create_rectangle(self.box[0], self.box[1], self.box[2], self.box[3], outline='red', width=2) |
def _on_mouse_down(self, event):
' if mouse clicked on crop area, allow moving crop '
if ((event.x >= self.box[0]) and (event.x <= self.box[2])):
if ((event.y >= self.box[1]) and (event.y <= self.box[3])):
self.movingCrop = True
self.lastLocation = [event.x, event.y] | -6,490,509,508,986,440,000 | if mouse clicked on crop area, allow moving crop | pyImageCropper/pyImageCropper.py | _on_mouse_down | Writ3r/pyImageCropper | python | def _on_mouse_down(self, event):
' '
if ((event.x >= self.box[0]) and (event.x <= self.box[2])):
if ((event.y >= self.box[1]) and (event.y <= self.box[3])):
self.movingCrop = True
self.lastLocation = [event.x, event.y] |
def _on_mouse_release(self, event):
' stop allowing movement of crop area '
self._on_mouse_move(event)
self.movingCrop = False | 188,714,077,504,388,600 | stop allowing movement of crop area | pyImageCropper/pyImageCropper.py | _on_mouse_release | Writ3r/pyImageCropper | python | def _on_mouse_release(self, event):
' '
self._on_mouse_move(event)
self.movingCrop = False |
def _on_mouse_move(self, event):
" move crop along with the user's mouse "
if self.movingCrop:
if _point_on_image((event.x, event.y), self.resized_img):
deltaX = (event.x - self.lastLocation[0])
deltaY = (event.y - self.lastLocation[1])
if ((self.box[0] + deltaX) < 0):
deltaX = 0
if ((self.box[1] + deltaY) < 0):
deltaY = 0
if ((self.box[2] + deltaX) > self.resized_img.size[0]):
deltaX = (self.box[2] - self.resized_img.size[0])
if ((self.box[3] + deltaY) > self.resized_img.size[1]):
deltaY = (self.box[3] - self.resized_img.size[1])
self.box = [(self.box[0] + deltaX), (self.box[1] + deltaY), (self.box[2] + deltaX), (self.box[3] + deltaY)]
self._refresh_crop_rectangle(deltaX, deltaY)
self.lastLocation = [event.x, event.y] | 8,556,816,214,101,553,000 | move crop along with the user's mouse | pyImageCropper/pyImageCropper.py | _on_mouse_move | Writ3r/pyImageCropper | python | def _on_mouse_move(self, event):
" "
if self.movingCrop:
if _point_on_image((event.x, event.y), self.resized_img):
deltaX = (event.x - self.lastLocation[0])
deltaY = (event.y - self.lastLocation[1])
if ((self.box[0] + deltaX) < 0):
deltaX = 0
if ((self.box[1] + deltaY) < 0):
deltaY = 0
if ((self.box[2] + deltaX) > self.resized_img.size[0]):
deltaX = (self.box[2] - self.resized_img.size[0])
if ((self.box[3] + deltaY) > self.resized_img.size[1]):
deltaY = (self.box[3] - self.resized_img.size[1])
self.box = [(self.box[0] + deltaX), (self.box[1] + deltaY), (self.box[2] + deltaX), (self.box[3] + deltaY)]
self._refresh_crop_rectangle(deltaX, deltaY)
self.lastLocation = [event.x, event.y] |
def reset_module(self):
' reset all local vars\n Args:\n None\n Returns:\n None\n '
for mesh in self._mesh_obj_list:
mesh.reset_module()
del mesh
self.reset_base()
self._mesh_list = []
self._mesh_obj_list = [] | 7,837,867,828,147,708,000 | reset all local vars
Args:
None
Returns:
None | src/assets/handle/TSSMeshHandle.py | reset_module | 5trobl/oaisys | python | def reset_module(self):
' reset all local vars\n Args:\n None\n Returns:\n None\n '
for mesh in self._mesh_obj_list:
mesh.reset_module()
del mesh
self.reset_base()
self._mesh_list = []
self._mesh_obj_list = [] |
def activate_pass(self, pass_name, pass_cfg, keyframe=(- 1)):
' enables specific pass\n Args:\n pass_name: name of pass to activate [string]\n pass_cfg: specific parameters for the pass [dict]\n keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]\n Returns:\n None\n '
for mesh in self._mesh_obj_list:
mesh.activate_pass(pass_name=pass_name, pass_cfg=pass_cfg, keyframe=keyframe) | -5,100,791,950,728,135,000 | enables specific pass
Args:
pass_name: name of pass to activate [string]
pass_cfg: specific parameters for the pass [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None | src/assets/handle/TSSMeshHandle.py | activate_pass | 5trobl/oaisys | python | def activate_pass(self, pass_name, pass_cfg, keyframe=(- 1)):
' enables specific pass\n Args:\n pass_name: name of pass to activate [string]\n pass_cfg: specific parameters for the pass [dict]\n keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]\n Returns:\n None\n '
for mesh in self._mesh_obj_list:
mesh.activate_pass(pass_name=pass_name, pass_cfg=pass_cfg, keyframe=keyframe) |
def create(self, stage_dict):
' create function\n Args:\n stage_dict: dict of stages [dict]\n Returns:\n None\n '
self._create_meshes(cfg=self._cfg['MESHES'], general_cfg=self._cfg['GENERAL'], stage_dict=stage_dict) | 3,310,184,757,457,842,700 | create function
Args:
stage_dict: dict of stages [dict]
Returns:
None | src/assets/handle/TSSMeshHandle.py | create | 5trobl/oaisys | python | def create(self, stage_dict):
' create function\n Args:\n stage_dict: dict of stages [dict]\n Returns:\n None\n '
self._create_meshes(cfg=self._cfg['MESHES'], general_cfg=self._cfg['GENERAL'], stage_dict=stage_dict) |
def _create_meshes(self, cfg, general_cfg, stage_dict):
' create function\n Args:\n cfg: list of mesh cfgs [list]\n general_cfg: general cfg [dict]\n stage_dict: dict of stages [dict]\n Returns:\n success code [boolean]\n '
_current_instance_label_count = 0
for (ii, mesh) in enumerate(cfg):
try:
_module_name = ('src.assets.meshes.' + mesh['type'])
_module = importlib.import_module(_module_name)
_class = getattr(_module, mesh['type'])
_mesh = _class()
_mesh.set_general_cfg(cfg=general_cfg)
_mesh.set_stage_dict(stage_dict=stage_dict)
mesh['meshParams']['name'] = mesh['name']
_mesh.update_cfg(cfg=mesh['meshParams'])
(_instance_count, _instance_label_count) = _mesh.create(instance_id_offset=_current_instance_label_count)
_current_instance_label_count += _instance_label_count
self._mesh_obj_list.append(_mesh)
self._mesh_list.append(_mesh.get_meshes())
except ImportError:
raise Exception('Cannot add mesh')
return (- 1)
return 0 | 6,563,732,470,305,347,000 | create function
Args:
cfg: list of mesh cfgs [list]
general_cfg: general cfg [dict]
stage_dict: dict of stages [dict]
Returns:
success code [boolean] | src/assets/handle/TSSMeshHandle.py | _create_meshes | 5trobl/oaisys | python | def _create_meshes(self, cfg, general_cfg, stage_dict):
' create function\n Args:\n cfg: list of mesh cfgs [list]\n general_cfg: general cfg [dict]\n stage_dict: dict of stages [dict]\n Returns:\n success code [boolean]\n '
_current_instance_label_count = 0
for (ii, mesh) in enumerate(cfg):
try:
_module_name = ('src.assets.meshes.' + mesh['type'])
_module = importlib.import_module(_module_name)
_class = getattr(_module, mesh['type'])
_mesh = _class()
_mesh.set_general_cfg(cfg=general_cfg)
_mesh.set_stage_dict(stage_dict=stage_dict)
mesh['meshParams']['name'] = mesh['name']
_mesh.update_cfg(cfg=mesh['meshParams'])
(_instance_count, _instance_label_count) = _mesh.create(instance_id_offset=_current_instance_label_count)
_current_instance_label_count += _instance_label_count
self._mesh_obj_list.append(_mesh)
self._mesh_list.append(_mesh.get_meshes())
except ImportError:
raise Exception('Cannot add mesh')
return (- 1)
return 0 |
def get_meshes(self):
' get all meshes\n Args:\n None\n Returns:\n list of meshes [list]\n '
return self._mesh_list | -2,214,239,682,271,738,400 | get all meshes
Args:
None
Returns:
list of meshes [list] | src/assets/handle/TSSMeshHandle.py | get_meshes | 5trobl/oaisys | python | def get_meshes(self):
' get all meshes\n Args:\n None\n Returns:\n list of meshes [list]\n '
return self._mesh_list |
def get_mesh_objs(self):
' get all mesh objects\n Args:\n None\n Returns:\n list of mesh objects [list]\n '
return self._mesh_obj_list | -4,785,707,416,306,741,000 | get all mesh objects
Args:
None
Returns:
list of mesh objects [list] | src/assets/handle/TSSMeshHandle.py | get_mesh_objs | 5trobl/oaisys | python | def get_mesh_objs(self):
' get all mesh objects\n Args:\n None\n Returns:\n list of mesh objects [list]\n '
return self._mesh_obj_list |
def locked_volume_operation(f):
'Lock decorator for volume operations.\n\n Takes a named lock prior to executing the operation. The lock is named with\n the operation executed and the id of the volume. This lock can then be used\n by other operations to avoid operation conflicts on shared volumes.\n\n Example use:\n\n If a volume operation uses this decorator, it will block until the named\n lock is free. This is used to protect concurrent operations on the same\n volume e.g. delete VolA while create volume VolB from VolA is in progress.\n '
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized(('%s-%s' % (volume_id, f.__name__)), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1 | 4,139,636,561,719,643,000 | Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress. | cinder/volume/manager.py | locked_volume_operation | ISCAS-VDI/cinder-base | python | def locked_volume_operation(f):
'Lock decorator for volume operations.\n\n Takes a named lock prior to executing the operation. The lock is named with\n the operation executed and the id of the volume. This lock can then be used\n by other operations to avoid operation conflicts on shared volumes.\n\n Example use:\n\n If a volume operation uses this decorator, it will block until the named\n lock is free. This is used to protect concurrent operations on the same\n volume e.g. delete VolA while create volume VolB from VolA is in progress.\n '
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized(('%s-%s' % (volume_id, f.__name__)), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1 |
def locked_detach_operation(f):
"Lock decorator for volume detach operations.\n\n Takes a named lock prior to executing the detach call. The lock is named\n with the operation executed and the id of the volume. This lock can then\n be used by other operations to avoid operation conflicts on shared volumes.\n\n This locking mechanism is only for detach calls. We can't use the\n locked_volume_operation, because detach requires an additional\n attachment_id in the parameter list.\n "
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized(('%s-%s' % (volume_id, f.__name__)), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1 | 3,312,106,775,469,400,000 | Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list. | cinder/volume/manager.py | locked_detach_operation | ISCAS-VDI/cinder-base | python | def locked_detach_operation(f):
"Lock decorator for volume detach operations.\n\n Takes a named lock prior to executing the detach call. The lock is named\n with the operation executed and the id of the volume. This lock can then\n be used by other operations to avoid operation conflicts on shared volumes.\n\n This locking mechanism is only for detach calls. We can't use the\n locked_volume_operation, because detach requires an additional\n attachment_id in the parameter list.\n "
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized(('%s-%s' % (volume_id, f.__name__)), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1 |
def locked_snapshot_operation(f):
'Lock decorator for snapshot operations.\n\n Takes a named lock prior to executing the operation. The lock is named with\n the operation executed and the id of the snapshot. This lock can then be\n used by other operations to avoid operation conflicts on shared snapshots.\n\n Example use:\n\n If a snapshot operation uses this decorator, it will block until the named\n lock is free. This is used to protect concurrent operations on the same\n snapshot e.g. delete SnapA while create volume VolA from SnapA is in\n progress.\n '
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized(('%s-%s' % (snapshot.id, f.__name__)), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1 | 9,145,286,079,068,817,000 | Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress. | cinder/volume/manager.py | locked_snapshot_operation | ISCAS-VDI/cinder-base | python | def locked_snapshot_operation(f):
'Lock decorator for snapshot operations.\n\n Takes a named lock prior to executing the operation. The lock is named with\n the operation executed and the id of the snapshot. This lock can then be\n used by other operations to avoid operation conflicts on shared snapshots.\n\n Example use:\n\n If a snapshot operation uses this decorator, it will block until the named\n lock is free. This is used to protect concurrent operations on the same\n snapshot e.g. delete SnapA while create volume VolA from SnapA is in\n progress.\n '
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized(('%s-%s' % (snapshot.id, f.__name__)), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1 |
def __init__(self, volume_driver=None, service_name=None, *args, **kwargs):
'Load the driver from the one specified in args, or from flags.'
super(VolumeManager, self).__init__(*args, service_name='volume', **kwargs)
self.configuration = config.Configuration(volume_manager_opts, config_group=service_name)
self.stats = {}
if (not volume_driver):
volume_driver = self.configuration.volume_driver
if (volume_driver in MAPPING):
LOG.warning(_LW('Driver path %s is deprecated, update your configuration to the new path.'), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(context.get_admin_context())
LOG.debug('Cinder Volume DB check: vol_db_empty=%s', vol_db_empty)
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(context.get_admin_context(), svc_host, 'cinder-volume')
except exception.ServiceNotFound:
LOG.info(_LI('Service not found for updating active_backend_id, assuming default for driver init.'))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW('Suppressing requests library SSL Warnings'))
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.driver = importutils.import_object(volume_driver, configuration=self.configuration, db=self.db, host=self.host, is_vol_db_empty=vol_db_empty, active_backend_id=curr_active_backend_id)
self.message_api = message_api.API()
if (CONF.profiler.enabled and (profiler is not None)):
self.driver = profiler.trace_cls('driver')(self.driver)
try:
self.extra_capabilities = jsonutils.loads(self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Invalid JSON: %s'), self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get('image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get('image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get('image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(self.db, cinder_volume.API(), max_cache_size, max_cache_entries)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'), {'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'), {'host': self.host})
self.image_volume_cache = None | 2,959,714,953,866,413,000 | Load the driver from the one specified in args, or from flags. | cinder/volume/manager.py | __init__ | ISCAS-VDI/cinder-base | python | def __init__(self, volume_driver=None, service_name=None, *args, **kwargs):
super(VolumeManager, self).__init__(*args, service_name='volume', **kwargs)
self.configuration = config.Configuration(volume_manager_opts, config_group=service_name)
self.stats = {}
if (not volume_driver):
volume_driver = self.configuration.volume_driver
if (volume_driver in MAPPING):
LOG.warning(_LW('Driver path %s is deprecated, update your configuration to the new path.'), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(context.get_admin_context())
LOG.debug('Cinder Volume DB check: vol_db_empty=%s', vol_db_empty)
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(context.get_admin_context(), svc_host, 'cinder-volume')
except exception.ServiceNotFound:
LOG.info(_LI('Service not found for updating active_backend_id, assuming default for driver init.'))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW('Suppressing requests library SSL Warnings'))
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.driver = importutils.import_object(volume_driver, configuration=self.configuration, db=self.db, host=self.host, is_vol_db_empty=vol_db_empty, active_backend_id=curr_active_backend_id)
self.message_api = message_api.API()
if (CONF.profiler.enabled and (profiler is not None)):
self.driver = profiler.trace_cls('driver')(self.driver)
try:
self.extra_capabilities = jsonutils.loads(self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Invalid JSON: %s'), self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get('image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get('image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get('image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(self.db, cinder_volume.API(), max_cache_size, max_cache_entries)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'), {'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'), {'host': self.host})
self.image_volume_cache = None |
def _set_voldb_empty_at_startup_indicator(self, ctxt):
'Determine if the Cinder volume DB is empty.\n\n A check of the volume DB is done to determine whether it is empty or\n not at this point.\n\n :param ctxt: our working context\n '
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if (len(vol_entries) == 0):
LOG.info(_LI('Determined volume DB was empty at startup.'))
return True
else:
LOG.info(_LI('Determined volume DB was not empty at startup.'))
return False | -208,776,576,451,600,540 | Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context | cinder/volume/manager.py | _set_voldb_empty_at_startup_indicator | ISCAS-VDI/cinder-base | python | def _set_voldb_empty_at_startup_indicator(self, ctxt):
'Determine if the Cinder volume DB is empty.\n\n A check of the volume DB is done to determine whether it is empty or\n not at this point.\n\n :param ctxt: our working context\n '
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if (len(vol_entries) == 0):
LOG.info(_LI('Determined volume DB was empty at startup.'))
return True
else:
LOG.info(_LI('Determined volume DB was not empty at startup.'))
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.