repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
eng-tools/sfsimodels | sfsimodels/models/soils.py | Soil.override | def override(self, item, value):
"""
Can set a parameter to a value that is inconsistent with existing values.
This method sets the inconsistent value and then reapplies all existing values
that are still consistent, all non-consistent (conflicting) values are removed from the object
and returned as a list
:param item: name of parameter to be set
:param value: value of the parameter to be set
:return: list, conflicting values
"""
if not hasattr(self, item):
raise KeyError("Soil Object does not have property: %s", item)
try:
setattr(self, item, value) # try to set using normal setter method
return []
except ModelError:
pass # if inconsistency, then need to rebuild stack
# create a new temporary stack
temp_stack = list(self.stack)
# remove item from original position in stack
temp_stack[:] = (value for value in temp_stack if value[0] != item)
# add item to the start of the stack
temp_stack.insert(0, (item, value))
# clear object, ready to rebuild
self.reset_all()
# reapply trace, one item at a time, if conflict then don't add the conflict.
conflicts = []
for item, value in temp_stack:
# catch all conflicts
try:
setattr(self, item, value)
except ModelError:
conflicts.append(item)
return conflicts | python | def override(self, item, value):
"""
Can set a parameter to a value that is inconsistent with existing values.
This method sets the inconsistent value and then reapplies all existing values
that are still consistent, all non-consistent (conflicting) values are removed from the object
and returned as a list
:param item: name of parameter to be set
:param value: value of the parameter to be set
:return: list, conflicting values
"""
if not hasattr(self, item):
raise KeyError("Soil Object does not have property: %s", item)
try:
setattr(self, item, value) # try to set using normal setter method
return []
except ModelError:
pass # if inconsistency, then need to rebuild stack
# create a new temporary stack
temp_stack = list(self.stack)
# remove item from original position in stack
temp_stack[:] = (value for value in temp_stack if value[0] != item)
# add item to the start of the stack
temp_stack.insert(0, (item, value))
# clear object, ready to rebuild
self.reset_all()
# reapply trace, one item at a time, if conflict then don't add the conflict.
conflicts = []
for item, value in temp_stack:
# catch all conflicts
try:
setattr(self, item, value)
except ModelError:
conflicts.append(item)
return conflicts | [
"def",
"override",
"(",
"self",
",",
"item",
",",
"value",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"item",
")",
":",
"raise",
"KeyError",
"(",
"\"Soil Object does not have property: %s\"",
",",
"item",
")",
"try",
":",
"setattr",
"(",
"self",
",",
"item",
",",
"value",
")",
"# try to set using normal setter method",
"return",
"[",
"]",
"except",
"ModelError",
":",
"pass",
"# if inconsistency, then need to rebuild stack",
"# create a new temporary stack",
"temp_stack",
"=",
"list",
"(",
"self",
".",
"stack",
")",
"# remove item from original position in stack",
"temp_stack",
"[",
":",
"]",
"=",
"(",
"value",
"for",
"value",
"in",
"temp_stack",
"if",
"value",
"[",
"0",
"]",
"!=",
"item",
")",
"# add item to the start of the stack",
"temp_stack",
".",
"insert",
"(",
"0",
",",
"(",
"item",
",",
"value",
")",
")",
"# clear object, ready to rebuild",
"self",
".",
"reset_all",
"(",
")",
"# reapply trace, one item at a time, if conflict then don't add the conflict.",
"conflicts",
"=",
"[",
"]",
"for",
"item",
",",
"value",
"in",
"temp_stack",
":",
"# catch all conflicts",
"try",
":",
"setattr",
"(",
"self",
",",
"item",
",",
"value",
")",
"except",
"ModelError",
":",
"conflicts",
".",
"append",
"(",
"item",
")",
"return",
"conflicts"
] | Can set a parameter to a value that is inconsistent with existing values.
This method sets the inconsistent value and then reapplies all existing values
that are still consistent, all non-consistent (conflicting) values are removed from the object
and returned as a list
:param item: name of parameter to be set
:param value: value of the parameter to be set
:return: list, conflicting values | [
"Can",
"set",
"a",
"parameter",
"to",
"a",
"value",
"that",
"is",
"inconsistent",
"with",
"existing",
"values",
"."
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L84-L119 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | Soil.reset_all | def reset_all(self):
"""
Resets all parameters to None
"""
for item in self.inputs:
setattr(self, "_%s" % item, None)
self.stack = [] | python | def reset_all(self):
"""
Resets all parameters to None
"""
for item in self.inputs:
setattr(self, "_%s" % item, None)
self.stack = [] | [
"def",
"reset_all",
"(",
"self",
")",
":",
"for",
"item",
"in",
"self",
".",
"inputs",
":",
"setattr",
"(",
"self",
",",
"\"_%s\"",
"%",
"item",
",",
"None",
")",
"self",
".",
"stack",
"=",
"[",
"]"
] | Resets all parameters to None | [
"Resets",
"all",
"parameters",
"to",
"None"
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L121-L127 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | Soil._add_to_stack | def _add_to_stack(self, item, value):
"""
Add a parameter-value pair to the stack of parameters that have been set.
:param item:
:param value:
:return:
"""
p_value = (item, value)
if p_value not in self.stack:
self.stack.append(p_value) | python | def _add_to_stack(self, item, value):
"""
Add a parameter-value pair to the stack of parameters that have been set.
:param item:
:param value:
:return:
"""
p_value = (item, value)
if p_value not in self.stack:
self.stack.append(p_value) | [
"def",
"_add_to_stack",
"(",
"self",
",",
"item",
",",
"value",
")",
":",
"p_value",
"=",
"(",
"item",
",",
"value",
")",
"if",
"p_value",
"not",
"in",
"self",
".",
"stack",
":",
"self",
".",
"stack",
".",
"append",
"(",
"p_value",
")"
] | Add a parameter-value pair to the stack of parameters that have been set.
:param item:
:param value:
:return: | [
"Add",
"a",
"parameter",
"-",
"value",
"pair",
"to",
"the",
"stack",
"of",
"parameters",
"that",
"have",
"been",
"set",
".",
":",
"param",
"item",
":",
":",
"param",
"value",
":",
":",
"return",
":"
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L129-L138 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | Soil.get_shear_vel | def get_shear_vel(self, saturated):
"""
Calculate the shear wave velocity
:param saturated: bool, if true then use saturated mass
:return:
"""
try:
if saturated:
return np.sqrt(self.g_mod / self.unit_sat_mass)
else:
return np.sqrt(self.g_mod / self.unit_dry_mass)
except TypeError:
return None | python | def get_shear_vel(self, saturated):
"""
Calculate the shear wave velocity
:param saturated: bool, if true then use saturated mass
:return:
"""
try:
if saturated:
return np.sqrt(self.g_mod / self.unit_sat_mass)
else:
return np.sqrt(self.g_mod / self.unit_dry_mass)
except TypeError:
return None | [
"def",
"get_shear_vel",
"(",
"self",
",",
"saturated",
")",
":",
"try",
":",
"if",
"saturated",
":",
"return",
"np",
".",
"sqrt",
"(",
"self",
".",
"g_mod",
"/",
"self",
".",
"unit_sat_mass",
")",
"else",
":",
"return",
"np",
".",
"sqrt",
"(",
"self",
".",
"g_mod",
"/",
"self",
".",
"unit_dry_mass",
")",
"except",
"TypeError",
":",
"return",
"None"
] | Calculate the shear wave velocity
:param saturated: bool, if true then use saturated mass
:return: | [
"Calculate",
"the",
"shear",
"wave",
"velocity"
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L256-L269 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | Soil.saturation | def saturation(self, value):
"""Volume of water to volume of voids"""
value = clean_float(value)
if value is None:
return
try:
unit_moisture_weight = self.unit_moist_weight - self.unit_dry_weight
unit_moisture_volume = unit_moisture_weight / self._pw
saturation = unit_moisture_volume / self._calc_unit_void_volume()
if saturation is not None and not ct.isclose(saturation, value, rel_tol=self._tolerance):
raise ModelError("New saturation (%.3f) is inconsistent "
"with calculated value (%.3f)" % (value, saturation))
except TypeError:
pass
old_value = self.saturation
self._saturation = value
try:
self.recompute_all_weights_and_void()
self._add_to_stack("saturation", value)
except ModelError as e:
self._saturation = old_value
raise ModelError(e) | python | def saturation(self, value):
"""Volume of water to volume of voids"""
value = clean_float(value)
if value is None:
return
try:
unit_moisture_weight = self.unit_moist_weight - self.unit_dry_weight
unit_moisture_volume = unit_moisture_weight / self._pw
saturation = unit_moisture_volume / self._calc_unit_void_volume()
if saturation is not None and not ct.isclose(saturation, value, rel_tol=self._tolerance):
raise ModelError("New saturation (%.3f) is inconsistent "
"with calculated value (%.3f)" % (value, saturation))
except TypeError:
pass
old_value = self.saturation
self._saturation = value
try:
self.recompute_all_weights_and_void()
self._add_to_stack("saturation", value)
except ModelError as e:
self._saturation = old_value
raise ModelError(e) | [
"def",
"saturation",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"clean_float",
"(",
"value",
")",
"if",
"value",
"is",
"None",
":",
"return",
"try",
":",
"unit_moisture_weight",
"=",
"self",
".",
"unit_moist_weight",
"-",
"self",
".",
"unit_dry_weight",
"unit_moisture_volume",
"=",
"unit_moisture_weight",
"/",
"self",
".",
"_pw",
"saturation",
"=",
"unit_moisture_volume",
"/",
"self",
".",
"_calc_unit_void_volume",
"(",
")",
"if",
"saturation",
"is",
"not",
"None",
"and",
"not",
"ct",
".",
"isclose",
"(",
"saturation",
",",
"value",
",",
"rel_tol",
"=",
"self",
".",
"_tolerance",
")",
":",
"raise",
"ModelError",
"(",
"\"New saturation (%.3f) is inconsistent \"",
"\"with calculated value (%.3f)\"",
"%",
"(",
"value",
",",
"saturation",
")",
")",
"except",
"TypeError",
":",
"pass",
"old_value",
"=",
"self",
".",
"saturation",
"self",
".",
"_saturation",
"=",
"value",
"try",
":",
"self",
".",
"recompute_all_weights_and_void",
"(",
")",
"self",
".",
"_add_to_stack",
"(",
"\"saturation\"",
",",
"value",
")",
"except",
"ModelError",
"as",
"e",
":",
"self",
".",
"_saturation",
"=",
"old_value",
"raise",
"ModelError",
"(",
"e",
")"
] | Volume of water to volume of voids | [
"Volume",
"of",
"water",
"to",
"volume",
"of",
"voids"
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L416-L437 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | Soil.specific_gravity | def specific_gravity(self, value):
""" Set the relative weight of the solid """
value = clean_float(value)
if value is None:
return
specific_gravity = self._calc_specific_gravity()
if specific_gravity is not None and not ct.isclose(specific_gravity, value, rel_tol=self._tolerance):
raise ModelError("specific gravity is inconsistent with set unit_dry_weight and void_ratio")
self._specific_gravity = float(value)
self.stack.append(("specific_gravity", float(value)))
self.recompute_all_weights_and_void() | python | def specific_gravity(self, value):
""" Set the relative weight of the solid """
value = clean_float(value)
if value is None:
return
specific_gravity = self._calc_specific_gravity()
if specific_gravity is not None and not ct.isclose(specific_gravity, value, rel_tol=self._tolerance):
raise ModelError("specific gravity is inconsistent with set unit_dry_weight and void_ratio")
self._specific_gravity = float(value)
self.stack.append(("specific_gravity", float(value)))
self.recompute_all_weights_and_void() | [
"def",
"specific_gravity",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"clean_float",
"(",
"value",
")",
"if",
"value",
"is",
"None",
":",
"return",
"specific_gravity",
"=",
"self",
".",
"_calc_specific_gravity",
"(",
")",
"if",
"specific_gravity",
"is",
"not",
"None",
"and",
"not",
"ct",
".",
"isclose",
"(",
"specific_gravity",
",",
"value",
",",
"rel_tol",
"=",
"self",
".",
"_tolerance",
")",
":",
"raise",
"ModelError",
"(",
"\"specific gravity is inconsistent with set unit_dry_weight and void_ratio\"",
")",
"self",
".",
"_specific_gravity",
"=",
"float",
"(",
"value",
")",
"self",
".",
"stack",
".",
"append",
"(",
"(",
"\"specific_gravity\"",
",",
"float",
"(",
"value",
")",
")",
")",
"self",
".",
"recompute_all_weights_and_void",
"(",
")"
] | Set the relative weight of the solid | [
"Set",
"the",
"relative",
"weight",
"of",
"the",
"solid"
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L460-L471 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | SoilProfile.add_layer | def add_layer(self, depth, soil):
"""
Adds a soil to the SoilProfile at a set depth.
Note, the soils are automatically reordered based on depth from surface.
:param depth: depth from surface to top of soil layer
:param soil: Soil object
"""
self._layers[depth] = soil
self._sort_layers()
if self.hydrostatic:
if depth >= self.gwl:
soil.saturation = 1.0
else:
li = self.get_layer_index_by_depth(depth)
layer_height = self.layer_height(li)
if layer_height is None:
soil.saturation = 0.0
elif depth + layer_height <= self.gwl:
soil.saturation = 0.0
else:
sat_height = depth + self.layer_height(li) - self.gwl
soil.saturation = sat_height / self.layer_height(li) | python | def add_layer(self, depth, soil):
"""
Adds a soil to the SoilProfile at a set depth.
Note, the soils are automatically reordered based on depth from surface.
:param depth: depth from surface to top of soil layer
:param soil: Soil object
"""
self._layers[depth] = soil
self._sort_layers()
if self.hydrostatic:
if depth >= self.gwl:
soil.saturation = 1.0
else:
li = self.get_layer_index_by_depth(depth)
layer_height = self.layer_height(li)
if layer_height is None:
soil.saturation = 0.0
elif depth + layer_height <= self.gwl:
soil.saturation = 0.0
else:
sat_height = depth + self.layer_height(li) - self.gwl
soil.saturation = sat_height / self.layer_height(li) | [
"def",
"add_layer",
"(",
"self",
",",
"depth",
",",
"soil",
")",
":",
"self",
".",
"_layers",
"[",
"depth",
"]",
"=",
"soil",
"self",
".",
"_sort_layers",
"(",
")",
"if",
"self",
".",
"hydrostatic",
":",
"if",
"depth",
">=",
"self",
".",
"gwl",
":",
"soil",
".",
"saturation",
"=",
"1.0",
"else",
":",
"li",
"=",
"self",
".",
"get_layer_index_by_depth",
"(",
"depth",
")",
"layer_height",
"=",
"self",
".",
"layer_height",
"(",
"li",
")",
"if",
"layer_height",
"is",
"None",
":",
"soil",
".",
"saturation",
"=",
"0.0",
"elif",
"depth",
"+",
"layer_height",
"<=",
"self",
".",
"gwl",
":",
"soil",
".",
"saturation",
"=",
"0.0",
"else",
":",
"sat_height",
"=",
"depth",
"+",
"self",
".",
"layer_height",
"(",
"li",
")",
"-",
"self",
".",
"gwl",
"soil",
".",
"saturation",
"=",
"sat_height",
"/",
"self",
".",
"layer_height",
"(",
"li",
")"
] | Adds a soil to the SoilProfile at a set depth.
Note, the soils are automatically reordered based on depth from surface.
:param depth: depth from surface to top of soil layer
:param soil: Soil object | [
"Adds",
"a",
"soil",
"to",
"the",
"SoilProfile",
"at",
"a",
"set",
"depth",
"."
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L910-L934 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | SoilProfile._sort_layers | def _sort_layers(self):
"""Sort the layers by depth."""
self._layers = OrderedDict(sorted(self._layers.items(), key=lambda t: t[0])) | python | def _sort_layers(self):
"""Sort the layers by depth."""
self._layers = OrderedDict(sorted(self._layers.items(), key=lambda t: t[0])) | [
"def",
"_sort_layers",
"(",
"self",
")",
":",
"self",
".",
"_layers",
"=",
"OrderedDict",
"(",
"sorted",
"(",
"self",
".",
"_layers",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"t",
":",
"t",
"[",
"0",
"]",
")",
")"
] | Sort the layers by depth. | [
"Sort",
"the",
"layers",
"by",
"depth",
"."
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L936-L938 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | SoilProfile.layer_height | def layer_height(self, layer_int):
"""
Get the layer height by layer id number.
:param layer_int:
:return: float, height of the soil layer
"""
if layer_int == self.n_layers:
if self.height is None:
return None
return self.height - self.layer_depth(layer_int)
else:
return self.layer_depth(layer_int + 1) - self.layer_depth(layer_int) | python | def layer_height(self, layer_int):
"""
Get the layer height by layer id number.
:param layer_int:
:return: float, height of the soil layer
"""
if layer_int == self.n_layers:
if self.height is None:
return None
return self.height - self.layer_depth(layer_int)
else:
return self.layer_depth(layer_int + 1) - self.layer_depth(layer_int) | [
"def",
"layer_height",
"(",
"self",
",",
"layer_int",
")",
":",
"if",
"layer_int",
"==",
"self",
".",
"n_layers",
":",
"if",
"self",
".",
"height",
"is",
"None",
":",
"return",
"None",
"return",
"self",
".",
"height",
"-",
"self",
".",
"layer_depth",
"(",
"layer_int",
")",
"else",
":",
"return",
"self",
".",
"layer_depth",
"(",
"layer_int",
"+",
"1",
")",
"-",
"self",
".",
"layer_depth",
"(",
"layer_int",
")"
] | Get the layer height by layer id number.
:param layer_int:
:return: float, height of the soil layer | [
"Get",
"the",
"layer",
"height",
"by",
"layer",
"id",
"number",
"."
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L984-L996 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | SoilProfile.equivalent_crust_cohesion | def equivalent_crust_cohesion(self):
"""
Calculate the equivalent crust cohesion strength according to Karamitros et al. 2013 sett, pg 8 eq. 14
:return: equivalent cohesion [Pa]
"""
deprecation("Will be moved to a function")
if len(self.layers) > 1:
crust = self.layer(0)
crust_phi_r = np.radians(crust.phi)
equivalent_cohesion = crust.cohesion + crust.k_0 * self.crust_effective_unit_weight * \
self.layer_depth(1) / 2 * np.tan(crust_phi_r)
return equivalent_cohesion | python | def equivalent_crust_cohesion(self):
"""
Calculate the equivalent crust cohesion strength according to Karamitros et al. 2013 sett, pg 8 eq. 14
:return: equivalent cohesion [Pa]
"""
deprecation("Will be moved to a function")
if len(self.layers) > 1:
crust = self.layer(0)
crust_phi_r = np.radians(crust.phi)
equivalent_cohesion = crust.cohesion + crust.k_0 * self.crust_effective_unit_weight * \
self.layer_depth(1) / 2 * np.tan(crust_phi_r)
return equivalent_cohesion | [
"def",
"equivalent_crust_cohesion",
"(",
"self",
")",
":",
"deprecation",
"(",
"\"Will be moved to a function\"",
")",
"if",
"len",
"(",
"self",
".",
"layers",
")",
">",
"1",
":",
"crust",
"=",
"self",
".",
"layer",
"(",
"0",
")",
"crust_phi_r",
"=",
"np",
".",
"radians",
"(",
"crust",
".",
"phi",
")",
"equivalent_cohesion",
"=",
"crust",
".",
"cohesion",
"+",
"crust",
".",
"k_0",
"*",
"self",
".",
"crust_effective_unit_weight",
"*",
"self",
".",
"layer_depth",
"(",
"1",
")",
"/",
"2",
"*",
"np",
".",
"tan",
"(",
"crust_phi_r",
")",
"return",
"equivalent_cohesion"
] | Calculate the equivalent crust cohesion strength according to Karamitros et al. 2013 sett, pg 8 eq. 14
:return: equivalent cohesion [Pa] | [
"Calculate",
"the",
"equivalent",
"crust",
"cohesion",
"strength",
"according",
"to",
"Karamitros",
"et",
"al",
".",
"2013",
"sett",
"pg",
"8",
"eq",
".",
"14"
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1083-L1095 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | SoilProfile.get_v_total_stress_at_depth | def get_v_total_stress_at_depth(self, z):
"""
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
"""
if not hasattr(z, "__len__"):
return self.one_vertical_total_stress(z)
else:
sigma_v_effs = []
for value in z:
sigma_v_effs.append(self.one_vertical_total_stress(value))
return np.array(sigma_v_effs) | python | def get_v_total_stress_at_depth(self, z):
"""
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
"""
if not hasattr(z, "__len__"):
return self.one_vertical_total_stress(z)
else:
sigma_v_effs = []
for value in z:
sigma_v_effs.append(self.one_vertical_total_stress(value))
return np.array(sigma_v_effs) | [
"def",
"get_v_total_stress_at_depth",
"(",
"self",
",",
"z",
")",
":",
"if",
"not",
"hasattr",
"(",
"z",
",",
"\"__len__\"",
")",
":",
"return",
"self",
".",
"one_vertical_total_stress",
"(",
"z",
")",
"else",
":",
"sigma_v_effs",
"=",
"[",
"]",
"for",
"value",
"in",
"z",
":",
"sigma_v_effs",
".",
"append",
"(",
"self",
".",
"one_vertical_total_stress",
"(",
"value",
")",
")",
"return",
"np",
".",
"array",
"(",
"sigma_v_effs",
")"
] | Determine the vertical total stress at depth z, where z can be a number or an array of numbers. | [
"Determine",
"the",
"vertical",
"total",
"stress",
"at",
"depth",
"z",
"where",
"z",
"can",
"be",
"a",
"number",
"or",
"an",
"array",
"of",
"numbers",
"."
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1112-L1123 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | SoilProfile.one_vertical_total_stress | def one_vertical_total_stress(self, z_c):
"""
Determine the vertical total stress at a single depth z_c.
:param z_c: depth from surface
"""
total_stress = 0.0
depths = self.depths
end = 0
for layer_int in range(1, len(depths) + 1):
l_index = layer_int - 1
if z_c > depths[layer_int - 1]:
if l_index < len(depths) - 1 and z_c > depths[l_index + 1]:
height = depths[l_index + 1] - depths[l_index]
bottom_depth = depths[l_index + 1]
else:
end = 1
height = z_c - depths[l_index]
bottom_depth = z_c
if bottom_depth <= self.gwl:
total_stress += height * self.layer(layer_int).unit_dry_weight
else:
if self.layer(layer_int).unit_sat_weight is None:
raise AnalysisError("Saturated unit weight not defined for layer %i." % layer_int)
sat_height = bottom_depth - max(self.gwl, depths[l_index])
dry_height = height - sat_height
total_stress += dry_height * self.layer(layer_int).unit_dry_weight + \
sat_height * self.layer(layer_int).unit_sat_weight
else:
end = 1
if end:
break
return total_stress | python | def one_vertical_total_stress(self, z_c):
"""
Determine the vertical total stress at a single depth z_c.
:param z_c: depth from surface
"""
total_stress = 0.0
depths = self.depths
end = 0
for layer_int in range(1, len(depths) + 1):
l_index = layer_int - 1
if z_c > depths[layer_int - 1]:
if l_index < len(depths) - 1 and z_c > depths[l_index + 1]:
height = depths[l_index + 1] - depths[l_index]
bottom_depth = depths[l_index + 1]
else:
end = 1
height = z_c - depths[l_index]
bottom_depth = z_c
if bottom_depth <= self.gwl:
total_stress += height * self.layer(layer_int).unit_dry_weight
else:
if self.layer(layer_int).unit_sat_weight is None:
raise AnalysisError("Saturated unit weight not defined for layer %i." % layer_int)
sat_height = bottom_depth - max(self.gwl, depths[l_index])
dry_height = height - sat_height
total_stress += dry_height * self.layer(layer_int).unit_dry_weight + \
sat_height * self.layer(layer_int).unit_sat_weight
else:
end = 1
if end:
break
return total_stress | [
"def",
"one_vertical_total_stress",
"(",
"self",
",",
"z_c",
")",
":",
"total_stress",
"=",
"0.0",
"depths",
"=",
"self",
".",
"depths",
"end",
"=",
"0",
"for",
"layer_int",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"depths",
")",
"+",
"1",
")",
":",
"l_index",
"=",
"layer_int",
"-",
"1",
"if",
"z_c",
">",
"depths",
"[",
"layer_int",
"-",
"1",
"]",
":",
"if",
"l_index",
"<",
"len",
"(",
"depths",
")",
"-",
"1",
"and",
"z_c",
">",
"depths",
"[",
"l_index",
"+",
"1",
"]",
":",
"height",
"=",
"depths",
"[",
"l_index",
"+",
"1",
"]",
"-",
"depths",
"[",
"l_index",
"]",
"bottom_depth",
"=",
"depths",
"[",
"l_index",
"+",
"1",
"]",
"else",
":",
"end",
"=",
"1",
"height",
"=",
"z_c",
"-",
"depths",
"[",
"l_index",
"]",
"bottom_depth",
"=",
"z_c",
"if",
"bottom_depth",
"<=",
"self",
".",
"gwl",
":",
"total_stress",
"+=",
"height",
"*",
"self",
".",
"layer",
"(",
"layer_int",
")",
".",
"unit_dry_weight",
"else",
":",
"if",
"self",
".",
"layer",
"(",
"layer_int",
")",
".",
"unit_sat_weight",
"is",
"None",
":",
"raise",
"AnalysisError",
"(",
"\"Saturated unit weight not defined for layer %i.\"",
"%",
"layer_int",
")",
"sat_height",
"=",
"bottom_depth",
"-",
"max",
"(",
"self",
".",
"gwl",
",",
"depths",
"[",
"l_index",
"]",
")",
"dry_height",
"=",
"height",
"-",
"sat_height",
"total_stress",
"+=",
"dry_height",
"*",
"self",
".",
"layer",
"(",
"layer_int",
")",
".",
"unit_dry_weight",
"+",
"sat_height",
"*",
"self",
".",
"layer",
"(",
"layer_int",
")",
".",
"unit_sat_weight",
"else",
":",
"end",
"=",
"1",
"if",
"end",
":",
"break",
"return",
"total_stress"
] | Determine the vertical total stress at a single depth z_c.
:param z_c: depth from surface | [
"Determine",
"the",
"vertical",
"total",
"stress",
"at",
"a",
"single",
"depth",
"z_c",
"."
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1125-L1158 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | SoilProfile.get_v_eff_stress_at_depth | def get_v_eff_stress_at_depth(self, y_c):
"""
Determine the vertical effective stress at a single depth z_c.
:param y_c: float, depth from surface
"""
sigma_v_c = self.get_v_total_stress_at_depth(y_c)
pp = self.get_hydrostatic_pressure_at_depth(y_c)
sigma_veff_c = sigma_v_c - pp
return sigma_veff_c | python | def get_v_eff_stress_at_depth(self, y_c):
"""
Determine the vertical effective stress at a single depth z_c.
:param y_c: float, depth from surface
"""
sigma_v_c = self.get_v_total_stress_at_depth(y_c)
pp = self.get_hydrostatic_pressure_at_depth(y_c)
sigma_veff_c = sigma_v_c - pp
return sigma_veff_c | [
"def",
"get_v_eff_stress_at_depth",
"(",
"self",
",",
"y_c",
")",
":",
"sigma_v_c",
"=",
"self",
".",
"get_v_total_stress_at_depth",
"(",
"y_c",
")",
"pp",
"=",
"self",
".",
"get_hydrostatic_pressure_at_depth",
"(",
"y_c",
")",
"sigma_veff_c",
"=",
"sigma_v_c",
"-",
"pp",
"return",
"sigma_veff_c"
] | Determine the vertical effective stress at a single depth z_c.
:param y_c: float, depth from surface | [
"Determine",
"the",
"vertical",
"effective",
"stress",
"at",
"a",
"single",
"depth",
"z_c",
"."
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1181-L1190 |
eng-tools/sfsimodels | sfsimodels/models/soils.py | SoilProfile.shear_vel_at_depth | def shear_vel_at_depth(self, y_c):
"""
Get the shear wave velocity at a depth.
:param y_c: float, depth from surface
:return:
"""
sl = self.get_soil_at_depth(y_c)
if y_c <= self.gwl:
saturation = False
else:
saturation = True
if hasattr(sl, "get_shear_vel_at_v_eff_stress"):
v_eff = self.get_v_eff_stress_at_depth(y_c)
vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation)
else:
vs = sl.get_shear_vel(saturation)
return vs | python | def shear_vel_at_depth(self, y_c):
"""
Get the shear wave velocity at a depth.
:param y_c: float, depth from surface
:return:
"""
sl = self.get_soil_at_depth(y_c)
if y_c <= self.gwl:
saturation = False
else:
saturation = True
if hasattr(sl, "get_shear_vel_at_v_eff_stress"):
v_eff = self.get_v_eff_stress_at_depth(y_c)
vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation)
else:
vs = sl.get_shear_vel(saturation)
return vs | [
"def",
"shear_vel_at_depth",
"(",
"self",
",",
"y_c",
")",
":",
"sl",
"=",
"self",
".",
"get_soil_at_depth",
"(",
"y_c",
")",
"if",
"y_c",
"<=",
"self",
".",
"gwl",
":",
"saturation",
"=",
"False",
"else",
":",
"saturation",
"=",
"True",
"if",
"hasattr",
"(",
"sl",
",",
"\"get_shear_vel_at_v_eff_stress\"",
")",
":",
"v_eff",
"=",
"self",
".",
"get_v_eff_stress_at_depth",
"(",
"y_c",
")",
"vs",
"=",
"sl",
".",
"get_shear_vel_at_v_eff_stress",
"(",
"v_eff",
",",
"saturation",
")",
"else",
":",
"vs",
"=",
"sl",
".",
"get_shear_vel",
"(",
"saturation",
")",
"return",
"vs"
] | Get the shear wave velocity at a depth.
:param y_c: float, depth from surface
:return: | [
"Get",
"the",
"shear",
"wave",
"velocity",
"at",
"a",
"depth",
"."
] | train | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1197-L1214 |
sci-bots/svg-model | docs/generate_modules.py | create_package_file | def create_package_file(root, master_package, subroot, py_files, opts, subs):
"""Build the text of the file and write the file."""
package = os.path.split(root)[-1]
text = format_heading(1, '%s Package' % package)
# add each package's module
for py_file in py_files:
if shall_skip(os.path.join(root, py_file)):
continue
is_package = py_file == INIT
py_file = os.path.splitext(py_file)[0]
py_path = makename(subroot, py_file)
if is_package:
heading = ':mod:`%s` Package' % package
else:
heading = ':mod:`%s` Module' % py_file
text += format_heading(2, heading)
text += format_directive(is_package and subroot or py_path, master_package)
text += '\n'
# build a list of directories that are packages (they contain an INIT file)
subs = [sub for sub in subs if os.path.isfile(os.path.join(root, sub, INIT))]
# if there are some package directories, add a TOC for theses subpackages
if subs:
text += format_heading(2, 'Subpackages')
text += '.. toctree::\n\n'
for sub in subs:
text += ' %s.%s\n' % (makename(master_package, subroot), sub)
text += '\n'
write_file(makename(master_package, subroot), text, opts) | python | def create_package_file(root, master_package, subroot, py_files, opts, subs):
"""Build the text of the file and write the file."""
package = os.path.split(root)[-1]
text = format_heading(1, '%s Package' % package)
# add each package's module
for py_file in py_files:
if shall_skip(os.path.join(root, py_file)):
continue
is_package = py_file == INIT
py_file = os.path.splitext(py_file)[0]
py_path = makename(subroot, py_file)
if is_package:
heading = ':mod:`%s` Package' % package
else:
heading = ':mod:`%s` Module' % py_file
text += format_heading(2, heading)
text += format_directive(is_package and subroot or py_path, master_package)
text += '\n'
# build a list of directories that are packages (they contain an INIT file)
subs = [sub for sub in subs if os.path.isfile(os.path.join(root, sub, INIT))]
# if there are some package directories, add a TOC for theses subpackages
if subs:
text += format_heading(2, 'Subpackages')
text += '.. toctree::\n\n'
for sub in subs:
text += ' %s.%s\n' % (makename(master_package, subroot), sub)
text += '\n'
write_file(makename(master_package, subroot), text, opts) | [
"def",
"create_package_file",
"(",
"root",
",",
"master_package",
",",
"subroot",
",",
"py_files",
",",
"opts",
",",
"subs",
")",
":",
"package",
"=",
"os",
".",
"path",
".",
"split",
"(",
"root",
")",
"[",
"-",
"1",
"]",
"text",
"=",
"format_heading",
"(",
"1",
",",
"'%s Package'",
"%",
"package",
")",
"# add each package's module",
"for",
"py_file",
"in",
"py_files",
":",
"if",
"shall_skip",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"py_file",
")",
")",
":",
"continue",
"is_package",
"=",
"py_file",
"==",
"INIT",
"py_file",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"py_file",
")",
"[",
"0",
"]",
"py_path",
"=",
"makename",
"(",
"subroot",
",",
"py_file",
")",
"if",
"is_package",
":",
"heading",
"=",
"':mod:`%s` Package'",
"%",
"package",
"else",
":",
"heading",
"=",
"':mod:`%s` Module'",
"%",
"py_file",
"text",
"+=",
"format_heading",
"(",
"2",
",",
"heading",
")",
"text",
"+=",
"format_directive",
"(",
"is_package",
"and",
"subroot",
"or",
"py_path",
",",
"master_package",
")",
"text",
"+=",
"'\\n'",
"# build a list of directories that are packages (they contain an INIT file)",
"subs",
"=",
"[",
"sub",
"for",
"sub",
"in",
"subs",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"sub",
",",
"INIT",
")",
")",
"]",
"# if there are some package directories, add a TOC for theses subpackages",
"if",
"subs",
":",
"text",
"+=",
"format_heading",
"(",
"2",
",",
"'Subpackages'",
")",
"text",
"+=",
"'.. toctree::\\n\\n'",
"for",
"sub",
"in",
"subs",
":",
"text",
"+=",
"' %s.%s\\n'",
"%",
"(",
"makename",
"(",
"master_package",
",",
"subroot",
")",
",",
"sub",
")",
"text",
"+=",
"'\\n'",
"write_file",
"(",
"makename",
"(",
"master_package",
",",
"subroot",
")",
",",
"text",
",",
"opts",
")"
] | Build the text of the file and write the file. | [
"Build",
"the",
"text",
"of",
"the",
"file",
"and",
"write",
"the",
"file",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/docs/generate_modules.py#L85-L114 |
sci-bots/svg-model | docs/generate_modules.py | create_modules_toc_file | def create_modules_toc_file(master_package, modules, opts, name='modules'):
"""
Create the module's index.
"""
text = format_heading(1, '%s Modules' % opts.header)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
write_file(name, text, opts) | python | def create_modules_toc_file(master_package, modules, opts, name='modules'):
"""
Create the module's index.
"""
text = format_heading(1, '%s Modules' % opts.header)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
write_file(name, text, opts) | [
"def",
"create_modules_toc_file",
"(",
"master_package",
",",
"modules",
",",
"opts",
",",
"name",
"=",
"'modules'",
")",
":",
"text",
"=",
"format_heading",
"(",
"1",
",",
"'%s Modules'",
"%",
"opts",
".",
"header",
")",
"text",
"+=",
"'.. toctree::\\n'",
"text",
"+=",
"' :maxdepth: %s\\n\\n'",
"%",
"opts",
".",
"maxdepth",
"modules",
".",
"sort",
"(",
")",
"prev_module",
"=",
"''",
"for",
"module",
"in",
"modules",
":",
"# look if the module is a subpackage and, if yes, ignore it",
"if",
"module",
".",
"startswith",
"(",
"prev_module",
"+",
"'.'",
")",
":",
"continue",
"prev_module",
"=",
"module",
"text",
"+=",
"' %s\\n'",
"%",
"module",
"write_file",
"(",
"name",
",",
"text",
",",
"opts",
")"
] | Create the module's index. | [
"Create",
"the",
"module",
"s",
"index",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/docs/generate_modules.py#L116-L133 |
sci-bots/svg-model | docs/generate_modules.py | recurse_tree | def recurse_tree(path, excludes, opts):
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
# use absolute path for root, as relative paths like '../../foo' cause
# 'if "/." in root ...' to filter out *all* modules otherwise
path = os.path.abspath(path)
# check if the base directory is a package and get is name
if INIT in os.listdir(path):
package_name = path.split(os.path.sep)[-1]
else:
package_name = None
toc = []
tree = os.walk(path, False)
for root, subs, files in tree:
# keep only the Python script files
py_files = sorted([f for f in files if os.path.splitext(f)[1] == '.py'])
if INIT in py_files:
py_files.remove(INIT)
py_files.insert(0, INIT)
# remove hidden ('.') and private ('_') directories
subs = sorted([sub for sub in subs if sub[0] not in ['.', '_']])
# check if there are valid files to process
# TODO: could add check for windows hidden files
if "/." in root or "/_" in root \
or not py_files \
or is_excluded(root, excludes):
continue
if INIT in py_files:
# we are in package ...
if (# ... with subpackage(s)
subs
or
# ... with some module(s)
len(py_files) > 1
or
# ... with a not-to-be-skipped INIT file
not shall_skip(os.path.join(root, INIT))
):
subroot = root[len(path):].lstrip(os.path.sep).replace(os.path.sep, '.')
create_package_file(root, package_name, subroot, py_files, opts, subs)
toc.append(makename(package_name, subroot))
elif root == path:
# if we are at the root level, we don't require it to be a package
for py_file in py_files:
if not shall_skip(os.path.join(path, py_file)):
module = os.path.splitext(py_file)[0]
create_module_file(package_name, module, opts)
toc.append(makename(package_name, module))
# create the module's index
if not opts.notoc:
create_modules_toc_file(package_name, toc, opts) | python | def recurse_tree(path, excludes, opts):
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
# use absolute path for root, as relative paths like '../../foo' cause
# 'if "/." in root ...' to filter out *all* modules otherwise
path = os.path.abspath(path)
# check if the base directory is a package and get is name
if INIT in os.listdir(path):
package_name = path.split(os.path.sep)[-1]
else:
package_name = None
toc = []
tree = os.walk(path, False)
for root, subs, files in tree:
# keep only the Python script files
py_files = sorted([f for f in files if os.path.splitext(f)[1] == '.py'])
if INIT in py_files:
py_files.remove(INIT)
py_files.insert(0, INIT)
# remove hidden ('.') and private ('_') directories
subs = sorted([sub for sub in subs if sub[0] not in ['.', '_']])
# check if there are valid files to process
# TODO: could add check for windows hidden files
if "/." in root or "/_" in root \
or not py_files \
or is_excluded(root, excludes):
continue
if INIT in py_files:
# we are in package ...
if (# ... with subpackage(s)
subs
or
# ... with some module(s)
len(py_files) > 1
or
# ... with a not-to-be-skipped INIT file
not shall_skip(os.path.join(root, INIT))
):
subroot = root[len(path):].lstrip(os.path.sep).replace(os.path.sep, '.')
create_package_file(root, package_name, subroot, py_files, opts, subs)
toc.append(makename(package_name, subroot))
elif root == path:
# if we are at the root level, we don't require it to be a package
for py_file in py_files:
if not shall_skip(os.path.join(path, py_file)):
module = os.path.splitext(py_file)[0]
create_module_file(package_name, module, opts)
toc.append(makename(package_name, module))
# create the module's index
if not opts.notoc:
create_modules_toc_file(package_name, toc, opts) | [
"def",
"recurse_tree",
"(",
"path",
",",
"excludes",
",",
"opts",
")",
":",
"# use absolute path for root, as relative paths like '../../foo' cause",
"# 'if \"/.\" in root ...' to filter out *all* modules otherwise",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"# check if the base directory is a package and get is name",
"if",
"INIT",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"package_name",
"=",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"[",
"-",
"1",
"]",
"else",
":",
"package_name",
"=",
"None",
"toc",
"=",
"[",
"]",
"tree",
"=",
"os",
".",
"walk",
"(",
"path",
",",
"False",
")",
"for",
"root",
",",
"subs",
",",
"files",
"in",
"tree",
":",
"# keep only the Python script files",
"py_files",
"=",
"sorted",
"(",
"[",
"f",
"for",
"f",
"in",
"files",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"f",
")",
"[",
"1",
"]",
"==",
"'.py'",
"]",
")",
"if",
"INIT",
"in",
"py_files",
":",
"py_files",
".",
"remove",
"(",
"INIT",
")",
"py_files",
".",
"insert",
"(",
"0",
",",
"INIT",
")",
"# remove hidden ('.') and private ('_') directories",
"subs",
"=",
"sorted",
"(",
"[",
"sub",
"for",
"sub",
"in",
"subs",
"if",
"sub",
"[",
"0",
"]",
"not",
"in",
"[",
"'.'",
",",
"'_'",
"]",
"]",
")",
"# check if there are valid files to process",
"# TODO: could add check for windows hidden files",
"if",
"\"/.\"",
"in",
"root",
"or",
"\"/_\"",
"in",
"root",
"or",
"not",
"py_files",
"or",
"is_excluded",
"(",
"root",
",",
"excludes",
")",
":",
"continue",
"if",
"INIT",
"in",
"py_files",
":",
"# we are in package ...",
"if",
"(",
"# ... with subpackage(s)",
"subs",
"or",
"# ... with some module(s)",
"len",
"(",
"py_files",
")",
">",
"1",
"or",
"# ... with a not-to-be-skipped INIT file",
"not",
"shall_skip",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"INIT",
")",
")",
")",
":",
"subroot",
"=",
"root",
"[",
"len",
"(",
"path",
")",
":",
"]",
".",
"lstrip",
"(",
"os",
".",
"path",
".",
"sep",
")",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'.'",
")",
"create_package_file",
"(",
"root",
",",
"package_name",
",",
"subroot",
",",
"py_files",
",",
"opts",
",",
"subs",
")",
"toc",
".",
"append",
"(",
"makename",
"(",
"package_name",
",",
"subroot",
")",
")",
"elif",
"root",
"==",
"path",
":",
"# if we are at the root level, we don't require it to be a package",
"for",
"py_file",
"in",
"py_files",
":",
"if",
"not",
"shall_skip",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"py_file",
")",
")",
":",
"module",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"py_file",
")",
"[",
"0",
"]",
"create_module_file",
"(",
"package_name",
",",
"module",
",",
"opts",
")",
"toc",
".",
"append",
"(",
"makename",
"(",
"package_name",
",",
"module",
")",
")",
"# create the module's index",
"if",
"not",
"opts",
".",
"notoc",
":",
"create_modules_toc_file",
"(",
"package_name",
",",
"toc",
",",
"opts",
")"
] | Look for every file in the directory tree and create the corresponding
ReST files. | [
"Look",
"for",
"every",
"file",
"in",
"the",
"directory",
"tree",
"and",
"create",
"the",
"corresponding",
"ReST",
"files",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/docs/generate_modules.py#L142-L196 |
sci-bots/svg-model | docs/generate_modules.py | normalize_excludes | def normalize_excludes(rootpath, excludes):
"""
Normalize the excluded directory list:
* must be either an absolute path or start with rootpath,
* otherwise it is joined with rootpath
* with trailing slash
"""
sep = os.path.sep
f_excludes = []
for exclude in excludes:
if not os.path.isabs(exclude) and not exclude.startswith(rootpath):
exclude = os.path.join(rootpath, exclude)
if not exclude.endswith(sep):
exclude += sep
f_excludes.append(exclude)
return f_excludes | python | def normalize_excludes(rootpath, excludes):
"""
Normalize the excluded directory list:
* must be either an absolute path or start with rootpath,
* otherwise it is joined with rootpath
* with trailing slash
"""
sep = os.path.sep
f_excludes = []
for exclude in excludes:
if not os.path.isabs(exclude) and not exclude.startswith(rootpath):
exclude = os.path.join(rootpath, exclude)
if not exclude.endswith(sep):
exclude += sep
f_excludes.append(exclude)
return f_excludes | [
"def",
"normalize_excludes",
"(",
"rootpath",
",",
"excludes",
")",
":",
"sep",
"=",
"os",
".",
"path",
".",
"sep",
"f_excludes",
"=",
"[",
"]",
"for",
"exclude",
"in",
"excludes",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"exclude",
")",
"and",
"not",
"exclude",
".",
"startswith",
"(",
"rootpath",
")",
":",
"exclude",
"=",
"os",
".",
"path",
".",
"join",
"(",
"rootpath",
",",
"exclude",
")",
"if",
"not",
"exclude",
".",
"endswith",
"(",
"sep",
")",
":",
"exclude",
"+=",
"sep",
"f_excludes",
".",
"append",
"(",
"exclude",
")",
"return",
"f_excludes"
] | Normalize the excluded directory list:
* must be either an absolute path or start with rootpath,
* otherwise it is joined with rootpath
* with trailing slash | [
"Normalize",
"the",
"excluded",
"directory",
"list",
":",
"*",
"must",
"be",
"either",
"an",
"absolute",
"path",
"or",
"start",
"with",
"rootpath",
"*",
"otherwise",
"it",
"is",
"joined",
"with",
"rootpath",
"*",
"with",
"trailing",
"slash"
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/docs/generate_modules.py#L198-L213 |
sci-bots/svg-model | docs/generate_modules.py | is_excluded | def is_excluded(root, excludes):
"""
Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
sep = os.path.sep
if not root.endswith(sep):
root += sep
for exclude in excludes:
if root.startswith(exclude):
return True
return False | python | def is_excluded(root, excludes):
"""
Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
sep = os.path.sep
if not root.endswith(sep):
root += sep
for exclude in excludes:
if root.startswith(exclude):
return True
return False | [
"def",
"is_excluded",
"(",
"root",
",",
"excludes",
")",
":",
"sep",
"=",
"os",
".",
"path",
".",
"sep",
"if",
"not",
"root",
".",
"endswith",
"(",
"sep",
")",
":",
"root",
"+=",
"sep",
"for",
"exclude",
"in",
"excludes",
":",
"if",
"root",
".",
"startswith",
"(",
"exclude",
")",
":",
"return",
"True",
"return",
"False"
] | Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar". | [
"Check",
"if",
"the",
"directory",
"is",
"in",
"the",
"exclude",
"list",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/docs/generate_modules.py#L215-L228 |
sci-bots/svg-model | docs/generate_modules.py | main | def main():
"""
Parse and check the command line arguments.
"""
parser = optparse.OptionParser(usage="""usage: %prog [options] <package path> [exclude paths, ...]
Note: By default this script will not overwrite already created files.""")
parser.add_option("-n", "--doc-header", action="store", dest="header", help="Documentation Header (default=Project)", default="Project")
parser.add_option("-d", "--dest-dir", action="store", dest="destdir", help="Output destination directory", default="")
parser.add_option("-s", "--suffix", action="store", dest="suffix", help="module suffix (default=txt)", default="txt")
parser.add_option("-m", "--maxdepth", action="store", dest="maxdepth", help="Maximum depth of submodules to show in the TOC (default=4)", type="int", default=4)
parser.add_option("-r", "--dry-run", action="store_true", dest="dryrun", help="Run the script without creating the files")
parser.add_option("-f", "--force", action="store_true", dest="force", help="Overwrite all the files")
parser.add_option("-t", "--no-toc", action="store_true", dest="notoc", help="Don't create the table of content file")
(opts, args) = parser.parse_args()
if not args:
parser.error("package path is required.")
else:
rootpath, excludes = args[0], args[1:]
if os.path.isdir(rootpath):
# check if the output destination is a valid directory
if opts.destdir and os.path.isdir(opts.destdir):
excludes = normalize_excludes(rootpath, excludes)
recurse_tree(rootpath, excludes, opts)
else:
print '%s is not a valid output destination directory.' % opts.destdir
else:
print '%s is not a valid directory.' % rootpath | python | def main():
"""
Parse and check the command line arguments.
"""
parser = optparse.OptionParser(usage="""usage: %prog [options] <package path> [exclude paths, ...]
Note: By default this script will not overwrite already created files.""")
parser.add_option("-n", "--doc-header", action="store", dest="header", help="Documentation Header (default=Project)", default="Project")
parser.add_option("-d", "--dest-dir", action="store", dest="destdir", help="Output destination directory", default="")
parser.add_option("-s", "--suffix", action="store", dest="suffix", help="module suffix (default=txt)", default="txt")
parser.add_option("-m", "--maxdepth", action="store", dest="maxdepth", help="Maximum depth of submodules to show in the TOC (default=4)", type="int", default=4)
parser.add_option("-r", "--dry-run", action="store_true", dest="dryrun", help="Run the script without creating the files")
parser.add_option("-f", "--force", action="store_true", dest="force", help="Overwrite all the files")
parser.add_option("-t", "--no-toc", action="store_true", dest="notoc", help="Don't create the table of content file")
(opts, args) = parser.parse_args()
if not args:
parser.error("package path is required.")
else:
rootpath, excludes = args[0], args[1:]
if os.path.isdir(rootpath):
# check if the output destination is a valid directory
if opts.destdir and os.path.isdir(opts.destdir):
excludes = normalize_excludes(rootpath, excludes)
recurse_tree(rootpath, excludes, opts)
else:
print '%s is not a valid output destination directory.' % opts.destdir
else:
print '%s is not a valid directory.' % rootpath | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"optparse",
".",
"OptionParser",
"(",
"usage",
"=",
"\"\"\"usage: %prog [options] <package path> [exclude paths, ...]\n\nNote: By default this script will not overwrite already created files.\"\"\"",
")",
"parser",
".",
"add_option",
"(",
"\"-n\"",
",",
"\"--doc-header\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"header\"",
",",
"help",
"=",
"\"Documentation Header (default=Project)\"",
",",
"default",
"=",
"\"Project\"",
")",
"parser",
".",
"add_option",
"(",
"\"-d\"",
",",
"\"--dest-dir\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"destdir\"",
",",
"help",
"=",
"\"Output destination directory\"",
",",
"default",
"=",
"\"\"",
")",
"parser",
".",
"add_option",
"(",
"\"-s\"",
",",
"\"--suffix\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"suffix\"",
",",
"help",
"=",
"\"module suffix (default=txt)\"",
",",
"default",
"=",
"\"txt\"",
")",
"parser",
".",
"add_option",
"(",
"\"-m\"",
",",
"\"--maxdepth\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"maxdepth\"",
",",
"help",
"=",
"\"Maximum depth of submodules to show in the TOC (default=4)\"",
",",
"type",
"=",
"\"int\"",
",",
"default",
"=",
"4",
")",
"parser",
".",
"add_option",
"(",
"\"-r\"",
",",
"\"--dry-run\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"dryrun\"",
",",
"help",
"=",
"\"Run the script without creating the files\"",
")",
"parser",
".",
"add_option",
"(",
"\"-f\"",
",",
"\"--force\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"force\"",
",",
"help",
"=",
"\"Overwrite all the files\"",
")",
"parser",
".",
"add_option",
"(",
"\"-t\"",
",",
"\"--no-toc\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"notoc\"",
",",
"help",
"=",
"\"Don't create the table of content file\"",
")",
"(",
"opts",
",",
"args",
")",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"not",
"args",
":",
"parser",
".",
"error",
"(",
"\"package path is required.\"",
")",
"else",
":",
"rootpath",
",",
"excludes",
"=",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
":",
"]",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"rootpath",
")",
":",
"# check if the output destination is a valid directory",
"if",
"opts",
".",
"destdir",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"opts",
".",
"destdir",
")",
":",
"excludes",
"=",
"normalize_excludes",
"(",
"rootpath",
",",
"excludes",
")",
"recurse_tree",
"(",
"rootpath",
",",
"excludes",
",",
"opts",
")",
"else",
":",
"print",
"'%s is not a valid output destination directory.'",
"%",
"opts",
".",
"destdir",
"else",
":",
"print",
"'%s is not a valid directory.'",
"%",
"rootpath"
] | Parse and check the command line arguments. | [
"Parse",
"and",
"check",
"the",
"command",
"line",
"arguments",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/docs/generate_modules.py#L230-L257 |
Metatab/metatab | metatab/doc.py | MetatabDoc.path | def path(self):
"""Return the path to the file, if the ref is a file"""
if not isinstance(self.ref, str):
return None
u = parse_app_url(self.ref)
if u.inner.proto != 'file':
return None
return u.path | python | def path(self):
"""Return the path to the file, if the ref is a file"""
if not isinstance(self.ref, str):
return None
u = parse_app_url(self.ref)
if u.inner.proto != 'file':
return None
return u.path | [
"def",
"path",
"(",
"self",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"ref",
",",
"str",
")",
":",
"return",
"None",
"u",
"=",
"parse_app_url",
"(",
"self",
".",
"ref",
")",
"if",
"u",
".",
"inner",
".",
"proto",
"!=",
"'file'",
":",
"return",
"None",
"return",
"u",
".",
"path"
] | Return the path to the file, if the ref is a file | [
"Return",
"the",
"path",
"to",
"the",
"file",
"if",
"the",
"ref",
"is",
"a",
"file"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L98-L109 |
Metatab/metatab | metatab/doc.py | MetatabDoc.doc_dir | def doc_dir(self):
"""The absolute directory of the document"""
from os.path import abspath
if not self.ref:
return None
u = parse_app_url(self.ref)
return abspath(dirname(u.path)) | python | def doc_dir(self):
"""The absolute directory of the document"""
from os.path import abspath
if not self.ref:
return None
u = parse_app_url(self.ref)
return abspath(dirname(u.path)) | [
"def",
"doc_dir",
"(",
"self",
")",
":",
"from",
"os",
".",
"path",
"import",
"abspath",
"if",
"not",
"self",
".",
"ref",
":",
"return",
"None",
"u",
"=",
"parse_app_url",
"(",
"self",
".",
"ref",
")",
"return",
"abspath",
"(",
"dirname",
"(",
"u",
".",
"path",
")",
")"
] | The absolute directory of the document | [
"The",
"absolute",
"directory",
"of",
"the",
"document"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L135-L143 |
Metatab/metatab | metatab/doc.py | MetatabDoc.remove_term | def remove_term(self, t):
"""Only removes top-level terms. Child terms can be removed at the parent. """
try:
self.terms.remove(t)
except ValueError:
pass
if t.section and t.parent_term_lc == 'root':
t.section = self.add_section(t.section)
t.section.remove_term(t, remove_from_doc=False)
if t.parent:
try:
t.parent.remove_child(t)
except ValueError:
pass | python | def remove_term(self, t):
"""Only removes top-level terms. Child terms can be removed at the parent. """
try:
self.terms.remove(t)
except ValueError:
pass
if t.section and t.parent_term_lc == 'root':
t.section = self.add_section(t.section)
t.section.remove_term(t, remove_from_doc=False)
if t.parent:
try:
t.parent.remove_child(t)
except ValueError:
pass | [
"def",
"remove_term",
"(",
"self",
",",
"t",
")",
":",
"try",
":",
"self",
".",
"terms",
".",
"remove",
"(",
"t",
")",
"except",
"ValueError",
":",
"pass",
"if",
"t",
".",
"section",
"and",
"t",
".",
"parent_term_lc",
"==",
"'root'",
":",
"t",
".",
"section",
"=",
"self",
".",
"add_section",
"(",
"t",
".",
"section",
")",
"t",
".",
"section",
".",
"remove_term",
"(",
"t",
",",
"remove_from_doc",
"=",
"False",
")",
"if",
"t",
".",
"parent",
":",
"try",
":",
"t",
".",
"parent",
".",
"remove_child",
"(",
"t",
")",
"except",
"ValueError",
":",
"pass"
] | Only removes top-level terms. Child terms can be removed at the parent. | [
"Only",
"removes",
"top",
"-",
"level",
"terms",
".",
"Child",
"terms",
"can",
"be",
"removed",
"at",
"the",
"parent",
"."
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L227-L244 |
Metatab/metatab | metatab/doc.py | MetatabDoc.new_section | def new_section(self, name, params=None):
"""Return a new section"""
self.sections[name.lower()] = SectionTerm(None, name, term_args=params, doc=self)
# Set the default arguments
s = self.sections[name.lower()]
if name.lower() in self.decl_sections:
s.args = self.decl_sections[name.lower()]['args']
return s | python | def new_section(self, name, params=None):
"""Return a new section"""
self.sections[name.lower()] = SectionTerm(None, name, term_args=params, doc=self)
# Set the default arguments
s = self.sections[name.lower()]
if name.lower() in self.decl_sections:
s.args = self.decl_sections[name.lower()]['args']
return s | [
"def",
"new_section",
"(",
"self",
",",
"name",
",",
"params",
"=",
"None",
")",
":",
"self",
".",
"sections",
"[",
"name",
".",
"lower",
"(",
")",
"]",
"=",
"SectionTerm",
"(",
"None",
",",
"name",
",",
"term_args",
"=",
"params",
",",
"doc",
"=",
"self",
")",
"# Set the default arguments",
"s",
"=",
"self",
".",
"sections",
"[",
"name",
".",
"lower",
"(",
")",
"]",
"if",
"name",
".",
"lower",
"(",
")",
"in",
"self",
".",
"decl_sections",
":",
"s",
".",
"args",
"=",
"self",
".",
"decl_sections",
"[",
"name",
".",
"lower",
"(",
")",
"]",
"[",
"'args'",
"]",
"return",
"s"
] | Return a new section | [
"Return",
"a",
"new",
"section"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L263-L273 |
Metatab/metatab | metatab/doc.py | MetatabDoc.get_or_new_section | def get_or_new_section(self, name, params=None):
"""Create a new section or return an existing one of the same name"""
if name not in self.sections:
self.sections[name.lower()] = SectionTerm(None, name, term_args=params, doc=self)
return self.sections[name.lower()] | python | def get_or_new_section(self, name, params=None):
"""Create a new section or return an existing one of the same name"""
if name not in self.sections:
self.sections[name.lower()] = SectionTerm(None, name, term_args=params, doc=self)
return self.sections[name.lower()] | [
"def",
"get_or_new_section",
"(",
"self",
",",
"name",
",",
"params",
"=",
"None",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"sections",
":",
"self",
".",
"sections",
"[",
"name",
".",
"lower",
"(",
")",
"]",
"=",
"SectionTerm",
"(",
"None",
",",
"name",
",",
"term_args",
"=",
"params",
",",
"doc",
"=",
"self",
")",
"return",
"self",
".",
"sections",
"[",
"name",
".",
"lower",
"(",
")",
"]"
] | Create a new section or return an existing one of the same name | [
"Create",
"a",
"new",
"section",
"or",
"return",
"an",
"existing",
"one",
"of",
"the",
"same",
"name"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L275-L280 |
Metatab/metatab | metatab/doc.py | MetatabDoc.sort_sections | def sort_sections(self, order):
"""
Sort sections according to the section names in the order list. All remaining sections
are added to the end in their original order
:param order: Iterable of section names
:return:
"""
order_lc = [e.lower() for e in order]
sections = OrderedDict( (k,self.sections[k]) for k in order_lc if k in self.sections)
sections.update( (k,self.sections[k]) for k in self.sections.keys() if k not in order_lc)
assert len(self.sections) == len(sections)
self.sections = sections | python | def sort_sections(self, order):
"""
Sort sections according to the section names in the order list. All remaining sections
are added to the end in their original order
:param order: Iterable of section names
:return:
"""
order_lc = [e.lower() for e in order]
sections = OrderedDict( (k,self.sections[k]) for k in order_lc if k in self.sections)
sections.update( (k,self.sections[k]) for k in self.sections.keys() if k not in order_lc)
assert len(self.sections) == len(sections)
self.sections = sections | [
"def",
"sort_sections",
"(",
"self",
",",
"order",
")",
":",
"order_lc",
"=",
"[",
"e",
".",
"lower",
"(",
")",
"for",
"e",
"in",
"order",
"]",
"sections",
"=",
"OrderedDict",
"(",
"(",
"k",
",",
"self",
".",
"sections",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"order_lc",
"if",
"k",
"in",
"self",
".",
"sections",
")",
"sections",
".",
"update",
"(",
"(",
"k",
",",
"self",
".",
"sections",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"self",
".",
"sections",
".",
"keys",
"(",
")",
"if",
"k",
"not",
"in",
"order_lc",
")",
"assert",
"len",
"(",
"self",
".",
"sections",
")",
"==",
"len",
"(",
"sections",
")",
"self",
".",
"sections",
"=",
"sections"
] | Sort sections according to the section names in the order list. All remaining sections
are added to the end in their original order
:param order: Iterable of section names
:return: | [
"Sort",
"sections",
"according",
"to",
"the",
"section",
"names",
"in",
"the",
"order",
"list",
".",
"All",
"remaining",
"sections",
"are",
"added",
"to",
"the",
"end",
"in",
"their",
"original",
"order"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L291-L308 |
Metatab/metatab | metatab/doc.py | MetatabDoc.find | def find(self, term, value=False, section=None, _expand_derived=True, **kwargs):
"""Return a list of terms, possibly in a particular section. Use joined term notation, such as 'Root.Name' The kwargs arg is used to set term properties, all of which match returned terms, so ``name='foobar'`` will match terms that have a ``name`` property of ``foobar``
:param term: The type of term to find, in fully-qulified notation, or use '*' for wild cards in either the parent or the record parts, such as 'Root.*', '*.Table' or '*.*'
:param value: Select terms with a given value
:param section: The name of the section in which to restrict the search
:param kwargs: See additional properties on which to match terms.
"""
import itertools
if kwargs: # Look for terms with particular property values
terms = self.find(term, value, section)
found_terms = []
for t in terms:
if all(t.get_value(k) == v for k, v in kwargs.items()):
found_terms.append(t)
return found_terms
def in_section(term, section):
if section is None:
return True
if term.section is None:
return False
if isinstance(section, (list, tuple)):
return any(in_section(t, e) for e in section)
else:
return section.lower() == term.section.name.lower()
# Try to replace the term with the list of its derived terms; that is, replace the super-class with all
# of the derived classes, but only do this expansion once.
if _expand_derived:
try:
try:
# Term is a string
term = list(self.derived_terms[term.lower()]) + [term]
except AttributeError: # Term is hopefully a list
terms = []
for t in term:
terms.append(term)
for dt in self.derived_terms[t.lower()]:
terms.append(dt)
except KeyError as e:
pass
# Find any of a list of terms
if isinstance(term, (list, tuple)):
return list(itertools.chain(*[self.find(e, value=value, section=section, _expand_derived=False) for e in term]))
else:
term = term.lower()
found = []
if not '.' in term:
term = 'root.' + term
if term.startswith('root.'):
term_gen = self.terms # Just the root level terms
else:
term_gen = self.all_terms # All terms, root level and children.
for t in term_gen:
if t.join_lc == 'root.root':
continue
assert t.section or t.join_lc == 'root.root' or t.join_lc == 'root.section', t
if (t.term_is(term)
and in_section(t, section)
and (value is False or value == t.value)):
found.append(t)
return found | python | def find(self, term, value=False, section=None, _expand_derived=True, **kwargs):
"""Return a list of terms, possibly in a particular section. Use joined term notation, such as 'Root.Name' The kwargs arg is used to set term properties, all of which match returned terms, so ``name='foobar'`` will match terms that have a ``name`` property of ``foobar``
:param term: The type of term to find, in fully-qulified notation, or use '*' for wild cards in either the parent or the record parts, such as 'Root.*', '*.Table' or '*.*'
:param value: Select terms with a given value
:param section: The name of the section in which to restrict the search
:param kwargs: See additional properties on which to match terms.
"""
import itertools
if kwargs: # Look for terms with particular property values
terms = self.find(term, value, section)
found_terms = []
for t in terms:
if all(t.get_value(k) == v for k, v in kwargs.items()):
found_terms.append(t)
return found_terms
def in_section(term, section):
if section is None:
return True
if term.section is None:
return False
if isinstance(section, (list, tuple)):
return any(in_section(t, e) for e in section)
else:
return section.lower() == term.section.name.lower()
# Try to replace the term with the list of its derived terms; that is, replace the super-class with all
# of the derived classes, but only do this expansion once.
if _expand_derived:
try:
try:
# Term is a string
term = list(self.derived_terms[term.lower()]) + [term]
except AttributeError: # Term is hopefully a list
terms = []
for t in term:
terms.append(term)
for dt in self.derived_terms[t.lower()]:
terms.append(dt)
except KeyError as e:
pass
# Find any of a list of terms
if isinstance(term, (list, tuple)):
return list(itertools.chain(*[self.find(e, value=value, section=section, _expand_derived=False) for e in term]))
else:
term = term.lower()
found = []
if not '.' in term:
term = 'root.' + term
if term.startswith('root.'):
term_gen = self.terms # Just the root level terms
else:
term_gen = self.all_terms # All terms, root level and children.
for t in term_gen:
if t.join_lc == 'root.root':
continue
assert t.section or t.join_lc == 'root.root' or t.join_lc == 'root.section', t
if (t.term_is(term)
and in_section(t, section)
and (value is False or value == t.value)):
found.append(t)
return found | [
"def",
"find",
"(",
"self",
",",
"term",
",",
"value",
"=",
"False",
",",
"section",
"=",
"None",
",",
"_expand_derived",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"itertools",
"if",
"kwargs",
":",
"# Look for terms with particular property values",
"terms",
"=",
"self",
".",
"find",
"(",
"term",
",",
"value",
",",
"section",
")",
"found_terms",
"=",
"[",
"]",
"for",
"t",
"in",
"terms",
":",
"if",
"all",
"(",
"t",
".",
"get_value",
"(",
"k",
")",
"==",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
":",
"found_terms",
".",
"append",
"(",
"t",
")",
"return",
"found_terms",
"def",
"in_section",
"(",
"term",
",",
"section",
")",
":",
"if",
"section",
"is",
"None",
":",
"return",
"True",
"if",
"term",
".",
"section",
"is",
"None",
":",
"return",
"False",
"if",
"isinstance",
"(",
"section",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"any",
"(",
"in_section",
"(",
"t",
",",
"e",
")",
"for",
"e",
"in",
"section",
")",
"else",
":",
"return",
"section",
".",
"lower",
"(",
")",
"==",
"term",
".",
"section",
".",
"name",
".",
"lower",
"(",
")",
"# Try to replace the term with the list of its derived terms; that is, replace the super-class with all",
"# of the derived classes, but only do this expansion once.",
"if",
"_expand_derived",
":",
"try",
":",
"try",
":",
"# Term is a string",
"term",
"=",
"list",
"(",
"self",
".",
"derived_terms",
"[",
"term",
".",
"lower",
"(",
")",
"]",
")",
"+",
"[",
"term",
"]",
"except",
"AttributeError",
":",
"# Term is hopefully a list",
"terms",
"=",
"[",
"]",
"for",
"t",
"in",
"term",
":",
"terms",
".",
"append",
"(",
"term",
")",
"for",
"dt",
"in",
"self",
".",
"derived_terms",
"[",
"t",
".",
"lower",
"(",
")",
"]",
":",
"terms",
".",
"append",
"(",
"dt",
")",
"except",
"KeyError",
"as",
"e",
":",
"pass",
"# Find any of a list of terms",
"if",
"isinstance",
"(",
"term",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"[",
"self",
".",
"find",
"(",
"e",
",",
"value",
"=",
"value",
",",
"section",
"=",
"section",
",",
"_expand_derived",
"=",
"False",
")",
"for",
"e",
"in",
"term",
"]",
")",
")",
"else",
":",
"term",
"=",
"term",
".",
"lower",
"(",
")",
"found",
"=",
"[",
"]",
"if",
"not",
"'.'",
"in",
"term",
":",
"term",
"=",
"'root.'",
"+",
"term",
"if",
"term",
".",
"startswith",
"(",
"'root.'",
")",
":",
"term_gen",
"=",
"self",
".",
"terms",
"# Just the root level terms",
"else",
":",
"term_gen",
"=",
"self",
".",
"all_terms",
"# All terms, root level and children.",
"for",
"t",
"in",
"term_gen",
":",
"if",
"t",
".",
"join_lc",
"==",
"'root.root'",
":",
"continue",
"assert",
"t",
".",
"section",
"or",
"t",
".",
"join_lc",
"==",
"'root.root'",
"or",
"t",
".",
"join_lc",
"==",
"'root.section'",
",",
"t",
"if",
"(",
"t",
".",
"term_is",
"(",
"term",
")",
"and",
"in_section",
"(",
"t",
",",
"section",
")",
"and",
"(",
"value",
"is",
"False",
"or",
"value",
"==",
"t",
".",
"value",
")",
")",
":",
"found",
".",
"append",
"(",
"t",
")",
"return",
"found"
] | Return a list of terms, possibly in a particular section. Use joined term notation, such as 'Root.Name' The kwargs arg is used to set term properties, all of which match returned terms, so ``name='foobar'`` will match terms that have a ``name`` property of ``foobar``
:param term: The type of term to find, in fully-qulified notation, or use '*' for wild cards in either the parent or the record parts, such as 'Root.*', '*.Table' or '*.*'
:param value: Select terms with a given value
:param section: The name of the section in which to restrict the search
:param kwargs: See additional properties on which to match terms. | [
"Return",
"a",
"list",
"of",
"terms",
"possibly",
"in",
"a",
"particular",
"section",
".",
"Use",
"joined",
"term",
"notation",
"such",
"as",
"Root",
".",
"Name",
"The",
"kwargs",
"arg",
"is",
"used",
"to",
"set",
"term",
"properties",
"all",
"of",
"which",
"match",
"returned",
"terms",
"so",
"name",
"=",
"foobar",
"will",
"match",
"terms",
"that",
"have",
"a",
"name",
"property",
"of",
"foobar"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L341-L424 |
Metatab/metatab | metatab/doc.py | MetatabDoc.get | def get(self, term, default=None):
"""Return the first term, returning the default if no term is found"""
v = self.find_first(term)
if not v:
return default
else:
return v | python | def get(self, term, default=None):
"""Return the first term, returning the default if no term is found"""
v = self.find_first(term)
if not v:
return default
else:
return v | [
"def",
"get",
"(",
"self",
",",
"term",
",",
"default",
"=",
"None",
")",
":",
"v",
"=",
"self",
".",
"find_first",
"(",
"term",
")",
"if",
"not",
"v",
":",
"return",
"default",
"else",
":",
"return",
"v"
] | Return the first term, returning the default if no term is found | [
"Return",
"the",
"first",
"term",
"returning",
"the",
"default",
"if",
"no",
"term",
"is",
"found"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L444-L451 |
Metatab/metatab | metatab/doc.py | MetatabDoc.get_value | def get_value(self, term, default=None, section=None):
"""Return the first value, returning the default if no term is found"""
term = self.find_first(term, value=False, section=section)
if term is None:
return default
else:
return term.value | python | def get_value(self, term, default=None, section=None):
"""Return the first value, returning the default if no term is found"""
term = self.find_first(term, value=False, section=section)
if term is None:
return default
else:
return term.value | [
"def",
"get_value",
"(",
"self",
",",
"term",
",",
"default",
"=",
"None",
",",
"section",
"=",
"None",
")",
":",
"term",
"=",
"self",
".",
"find_first",
"(",
"term",
",",
"value",
"=",
"False",
",",
"section",
"=",
"section",
")",
"if",
"term",
"is",
"None",
":",
"return",
"default",
"else",
":",
"return",
"term",
".",
"value"
] | Return the first value, returning the default if no term is found | [
"Return",
"the",
"first",
"value",
"returning",
"the",
"default",
"if",
"no",
"term",
"is",
"found"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L453-L460 |
Metatab/metatab | metatab/doc.py | MetatabDoc.load_terms | def load_terms(self, terms):
"""Create a builder from a sequence of terms, usually a TermInterpreter"""
#if self.root and len(self.root.children) > 0:
# raise MetatabError("Can't run after adding terms to document.")
for t in terms:
t.doc = self
if t.term_is('root.root'):
if not self.root:
self.root = t
self.add_section(t)
continue
if t.term_is('root.section'):
self.add_section(t)
elif t.parent_term_lc == 'root':
self.add_term(t)
else:
# These terms aren't added to the doc because they are attached to a
# parent term that is added to the doc.
assert t.parent is not None
try:
dd = terms.declare_dict
self.decl_terms.update(dd['terms'])
self.decl_sections.update(dd['sections'])
self.super_terms.update(terms.super_terms())
kf = lambda e: e[1] # Sort on the value
self.derived_terms ={ k:set( e[0] for e in g)
for k, g in groupby(sorted(self.super_terms.items(), key=kf), kf)}
except AttributeError as e:
pass
try:
self.errors = terms.errors_as_dict()
except AttributeError:
self.errors = {}
return self | python | def load_terms(self, terms):
"""Create a builder from a sequence of terms, usually a TermInterpreter"""
#if self.root and len(self.root.children) > 0:
# raise MetatabError("Can't run after adding terms to document.")
for t in terms:
t.doc = self
if t.term_is('root.root'):
if not self.root:
self.root = t
self.add_section(t)
continue
if t.term_is('root.section'):
self.add_section(t)
elif t.parent_term_lc == 'root':
self.add_term(t)
else:
# These terms aren't added to the doc because they are attached to a
# parent term that is added to the doc.
assert t.parent is not None
try:
dd = terms.declare_dict
self.decl_terms.update(dd['terms'])
self.decl_sections.update(dd['sections'])
self.super_terms.update(terms.super_terms())
kf = lambda e: e[1] # Sort on the value
self.derived_terms ={ k:set( e[0] for e in g)
for k, g in groupby(sorted(self.super_terms.items(), key=kf), kf)}
except AttributeError as e:
pass
try:
self.errors = terms.errors_as_dict()
except AttributeError:
self.errors = {}
return self | [
"def",
"load_terms",
"(",
"self",
",",
"terms",
")",
":",
"#if self.root and len(self.root.children) > 0:",
"# raise MetatabError(\"Can't run after adding terms to document.\")",
"for",
"t",
"in",
"terms",
":",
"t",
".",
"doc",
"=",
"self",
"if",
"t",
".",
"term_is",
"(",
"'root.root'",
")",
":",
"if",
"not",
"self",
".",
"root",
":",
"self",
".",
"root",
"=",
"t",
"self",
".",
"add_section",
"(",
"t",
")",
"continue",
"if",
"t",
".",
"term_is",
"(",
"'root.section'",
")",
":",
"self",
".",
"add_section",
"(",
"t",
")",
"elif",
"t",
".",
"parent_term_lc",
"==",
"'root'",
":",
"self",
".",
"add_term",
"(",
"t",
")",
"else",
":",
"# These terms aren't added to the doc because they are attached to a",
"# parent term that is added to the doc.",
"assert",
"t",
".",
"parent",
"is",
"not",
"None",
"try",
":",
"dd",
"=",
"terms",
".",
"declare_dict",
"self",
".",
"decl_terms",
".",
"update",
"(",
"dd",
"[",
"'terms'",
"]",
")",
"self",
".",
"decl_sections",
".",
"update",
"(",
"dd",
"[",
"'sections'",
"]",
")",
"self",
".",
"super_terms",
".",
"update",
"(",
"terms",
".",
"super_terms",
"(",
")",
")",
"kf",
"=",
"lambda",
"e",
":",
"e",
"[",
"1",
"]",
"# Sort on the value",
"self",
".",
"derived_terms",
"=",
"{",
"k",
":",
"set",
"(",
"e",
"[",
"0",
"]",
"for",
"e",
"in",
"g",
")",
"for",
"k",
",",
"g",
"in",
"groupby",
"(",
"sorted",
"(",
"self",
".",
"super_terms",
".",
"items",
"(",
")",
",",
"key",
"=",
"kf",
")",
",",
"kf",
")",
"}",
"except",
"AttributeError",
"as",
"e",
":",
"pass",
"try",
":",
"self",
".",
"errors",
"=",
"terms",
".",
"errors_as_dict",
"(",
")",
"except",
"AttributeError",
":",
"self",
".",
"errors",
"=",
"{",
"}",
"return",
"self"
] | Create a builder from a sequence of terms, usually a TermInterpreter | [
"Create",
"a",
"builder",
"from",
"a",
"sequence",
"of",
"terms",
"usually",
"a",
"TermInterpreter"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L466-L515 |
Metatab/metatab | metatab/doc.py | MetatabDoc.cleanse | def cleanse(self):
"""Clean up some terms, like ensuring that the name is a slug"""
from .util import slugify
self.ensure_identifier()
try:
self.update_name()
except MetatabError:
identifier = self['Root'].find_first('Root.Identifier')
name = self['Root'].find_first('Root.Name')
if name and name.value:
name.value = slugify(name.value)
elif name:
name.value = slugify(identifier.value)
else:
self['Root'].get_or_new_term('Root.Name').value = slugify(identifier.value) | python | def cleanse(self):
"""Clean up some terms, like ensuring that the name is a slug"""
from .util import slugify
self.ensure_identifier()
try:
self.update_name()
except MetatabError:
identifier = self['Root'].find_first('Root.Identifier')
name = self['Root'].find_first('Root.Name')
if name and name.value:
name.value = slugify(name.value)
elif name:
name.value = slugify(identifier.value)
else:
self['Root'].get_or_new_term('Root.Name').value = slugify(identifier.value) | [
"def",
"cleanse",
"(",
"self",
")",
":",
"from",
".",
"util",
"import",
"slugify",
"self",
".",
"ensure_identifier",
"(",
")",
"try",
":",
"self",
".",
"update_name",
"(",
")",
"except",
"MetatabError",
":",
"identifier",
"=",
"self",
"[",
"'Root'",
"]",
".",
"find_first",
"(",
"'Root.Identifier'",
")",
"name",
"=",
"self",
"[",
"'Root'",
"]",
".",
"find_first",
"(",
"'Root.Name'",
")",
"if",
"name",
"and",
"name",
".",
"value",
":",
"name",
".",
"value",
"=",
"slugify",
"(",
"name",
".",
"value",
")",
"elif",
"name",
":",
"name",
".",
"value",
"=",
"slugify",
"(",
"identifier",
".",
"value",
")",
"else",
":",
"self",
"[",
"'Root'",
"]",
".",
"get_or_new_term",
"(",
"'Root.Name'",
")",
".",
"value",
"=",
"slugify",
"(",
"identifier",
".",
"value",
")"
] | Clean up some terms, like ensuring that the name is a slug | [
"Clean",
"up",
"some",
"terms",
"like",
"ensuring",
"that",
"the",
"name",
"is",
"a",
"slug"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L524-L543 |
Metatab/metatab | metatab/doc.py | MetatabDoc.update_name | def update_name(self, force=False, create_term=False, report_unchanged=True):
"""Generate the Root.Name term from DatasetName, Version, Origin, TIme and Space"""
updates = []
self.ensure_identifier()
name_term = self.find_first('Root.Name')
if not name_term:
if create_term:
name_term = self['Root'].new_term('Root.Name','')
else:
updates.append("No Root.Name, can't update name")
return updates
orig_name = name_term.value
identifier = self.get_value('Root.Identifier')
datasetname = self.get_value('Root.Dataset')
if datasetname:
name = self._generate_identity_name()
if name != orig_name or force:
name_term.value = name
updates.append("Changed Name")
else:
if report_unchanged:
updates.append("Name did not change")
elif not orig_name:
if not identifier:
updates.append("Failed to find DatasetName term or Identity term. Giving up")
else:
updates.append("Setting the name to the identifier")
name_term.value = identifier
elif orig_name == identifier:
if report_unchanged:
updates.append("Name did not change")
else:
# There is no DatasetName, so we can't gneerate name, and the Root.Name is not empty, so we should
# not set it to the identity.
updates.append("No Root.Dataset, so can't update the name")
return updates | python | def update_name(self, force=False, create_term=False, report_unchanged=True):
"""Generate the Root.Name term from DatasetName, Version, Origin, TIme and Space"""
updates = []
self.ensure_identifier()
name_term = self.find_first('Root.Name')
if not name_term:
if create_term:
name_term = self['Root'].new_term('Root.Name','')
else:
updates.append("No Root.Name, can't update name")
return updates
orig_name = name_term.value
identifier = self.get_value('Root.Identifier')
datasetname = self.get_value('Root.Dataset')
if datasetname:
name = self._generate_identity_name()
if name != orig_name or force:
name_term.value = name
updates.append("Changed Name")
else:
if report_unchanged:
updates.append("Name did not change")
elif not orig_name:
if not identifier:
updates.append("Failed to find DatasetName term or Identity term. Giving up")
else:
updates.append("Setting the name to the identifier")
name_term.value = identifier
elif orig_name == identifier:
if report_unchanged:
updates.append("Name did not change")
else:
# There is no DatasetName, so we can't gneerate name, and the Root.Name is not empty, so we should
# not set it to the identity.
updates.append("No Root.Dataset, so can't update the name")
return updates | [
"def",
"update_name",
"(",
"self",
",",
"force",
"=",
"False",
",",
"create_term",
"=",
"False",
",",
"report_unchanged",
"=",
"True",
")",
":",
"updates",
"=",
"[",
"]",
"self",
".",
"ensure_identifier",
"(",
")",
"name_term",
"=",
"self",
".",
"find_first",
"(",
"'Root.Name'",
")",
"if",
"not",
"name_term",
":",
"if",
"create_term",
":",
"name_term",
"=",
"self",
"[",
"'Root'",
"]",
".",
"new_term",
"(",
"'Root.Name'",
",",
"''",
")",
"else",
":",
"updates",
".",
"append",
"(",
"\"No Root.Name, can't update name\"",
")",
"return",
"updates",
"orig_name",
"=",
"name_term",
".",
"value",
"identifier",
"=",
"self",
".",
"get_value",
"(",
"'Root.Identifier'",
")",
"datasetname",
"=",
"self",
".",
"get_value",
"(",
"'Root.Dataset'",
")",
"if",
"datasetname",
":",
"name",
"=",
"self",
".",
"_generate_identity_name",
"(",
")",
"if",
"name",
"!=",
"orig_name",
"or",
"force",
":",
"name_term",
".",
"value",
"=",
"name",
"updates",
".",
"append",
"(",
"\"Changed Name\"",
")",
"else",
":",
"if",
"report_unchanged",
":",
"updates",
".",
"append",
"(",
"\"Name did not change\"",
")",
"elif",
"not",
"orig_name",
":",
"if",
"not",
"identifier",
":",
"updates",
".",
"append",
"(",
"\"Failed to find DatasetName term or Identity term. Giving up\"",
")",
"else",
":",
"updates",
".",
"append",
"(",
"\"Setting the name to the identifier\"",
")",
"name_term",
".",
"value",
"=",
"identifier",
"elif",
"orig_name",
"==",
"identifier",
":",
"if",
"report_unchanged",
":",
"updates",
".",
"append",
"(",
"\"Name did not change\"",
")",
"else",
":",
"# There is no DatasetName, so we can't gneerate name, and the Root.Name is not empty, so we should",
"# not set it to the identity.",
"updates",
".",
"append",
"(",
"\"No Root.Dataset, so can't update the name\"",
")",
"return",
"updates"
] | Generate the Root.Name term from DatasetName, Version, Origin, TIme and Space | [
"Generate",
"the",
"Root",
".",
"Name",
"term",
"from",
"DatasetName",
"Version",
"Origin",
"TIme",
"and",
"Space"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L638-L689 |
Metatab/metatab | metatab/doc.py | MetatabDoc.as_dict | def as_dict(self, replace_value_names=True):
"""Iterate, link terms and convert to a dict"""
# This function is a hack, due to confusion between the root of the document, which
# should contain all terms, and the root section, which has only terms that are not
# in another section. So, here we are taking the Root section, and adding all of the other
# terms to it, as if it were also the root of the document tree.
r = RootSectionTerm(doc=self)
for s in self: # Iterate over sections
for t in s: # Iterate over the terms in each section.
r.terms.append(t)
return r.as_dict(replace_value_names) | python | def as_dict(self, replace_value_names=True):
"""Iterate, link terms and convert to a dict"""
# This function is a hack, due to confusion between the root of the document, which
# should contain all terms, and the root section, which has only terms that are not
# in another section. So, here we are taking the Root section, and adding all of the other
# terms to it, as if it were also the root of the document tree.
r = RootSectionTerm(doc=self)
for s in self: # Iterate over sections
for t in s: # Iterate over the terms in each section.
r.terms.append(t)
return r.as_dict(replace_value_names) | [
"def",
"as_dict",
"(",
"self",
",",
"replace_value_names",
"=",
"True",
")",
":",
"# This function is a hack, due to confusion between the root of the document, which",
"# should contain all terms, and the root section, which has only terms that are not",
"# in another section. So, here we are taking the Root section, and adding all of the other",
"# terms to it, as if it were also the root of the document tree.",
"r",
"=",
"RootSectionTerm",
"(",
"doc",
"=",
"self",
")",
"for",
"s",
"in",
"self",
":",
"# Iterate over sections",
"for",
"t",
"in",
"s",
":",
"# Iterate over the terms in each section.",
"r",
".",
"terms",
".",
"append",
"(",
"t",
")",
"return",
"r",
".",
"as_dict",
"(",
"replace_value_names",
")"
] | Iterate, link terms and convert to a dict | [
"Iterate",
"link",
"terms",
"and",
"convert",
"to",
"a",
"dict"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L738-L752 |
Metatab/metatab | metatab/doc.py | MetatabDoc.rows | def rows(self):
"""Iterate over all of the rows"""
for s_name, s in self.sections.items():
# Yield the section header
if s.name != 'Root':
yield [''] # Unecessary, but makes for nice formatting. Should actually be done just before write
yield ['Section', s.value] + s.property_names
# Yield all of the rows for terms in the section
for row in s.rows:
term, value = row
term = term.replace('root.', '').title()
try:
yield [term] + value
except:
yield [term] + [value] | python | def rows(self):
"""Iterate over all of the rows"""
for s_name, s in self.sections.items():
# Yield the section header
if s.name != 'Root':
yield [''] # Unecessary, but makes for nice formatting. Should actually be done just before write
yield ['Section', s.value] + s.property_names
# Yield all of the rows for terms in the section
for row in s.rows:
term, value = row
term = term.replace('root.', '').title()
try:
yield [term] + value
except:
yield [term] + [value] | [
"def",
"rows",
"(",
"self",
")",
":",
"for",
"s_name",
",",
"s",
"in",
"self",
".",
"sections",
".",
"items",
"(",
")",
":",
"# Yield the section header",
"if",
"s",
".",
"name",
"!=",
"'Root'",
":",
"yield",
"[",
"''",
"]",
"# Unecessary, but makes for nice formatting. Should actually be done just before write",
"yield",
"[",
"'Section'",
",",
"s",
".",
"value",
"]",
"+",
"s",
".",
"property_names",
"# Yield all of the rows for terms in the section",
"for",
"row",
"in",
"s",
".",
"rows",
":",
"term",
",",
"value",
"=",
"row",
"term",
"=",
"term",
".",
"replace",
"(",
"'root.'",
",",
"''",
")",
".",
"title",
"(",
")",
"try",
":",
"yield",
"[",
"term",
"]",
"+",
"value",
"except",
":",
"yield",
"[",
"term",
"]",
"+",
"[",
"value",
"]"
] | Iterate over all of the rows | [
"Iterate",
"over",
"all",
"of",
"the",
"rows"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L755-L775 |
Metatab/metatab | metatab/doc.py | MetatabDoc.all_terms | def all_terms(self):
"""Iterate over all of the terms. The self.terms property has only root level terms. This iterator
iterates over all terms"""
for s_name, s in self.sections.items():
# Yield the section header
if s.name != 'Root':
yield s
# Yield all of the rows for terms in the section
for rterm in s:
yield rterm
for d in rterm.descendents:
yield d | python | def all_terms(self):
"""Iterate over all of the terms. The self.terms property has only root level terms. This iterator
iterates over all terms"""
for s_name, s in self.sections.items():
# Yield the section header
if s.name != 'Root':
yield s
# Yield all of the rows for terms in the section
for rterm in s:
yield rterm
for d in rterm.descendents:
yield d | [
"def",
"all_terms",
"(",
"self",
")",
":",
"for",
"s_name",
",",
"s",
"in",
"self",
".",
"sections",
".",
"items",
"(",
")",
":",
"# Yield the section header",
"if",
"s",
".",
"name",
"!=",
"'Root'",
":",
"yield",
"s",
"# Yield all of the rows for terms in the section",
"for",
"rterm",
"in",
"s",
":",
"yield",
"rterm",
"for",
"d",
"in",
"rterm",
".",
"descendents",
":",
"yield",
"d"
] | Iterate over all of the terms. The self.terms property has only root level terms. This iterator
iterates over all terms | [
"Iterate",
"over",
"all",
"of",
"the",
"terms",
".",
"The",
"self",
".",
"terms",
"property",
"has",
"only",
"root",
"level",
"terms",
".",
"This",
"iterator",
"iterates",
"over",
"all",
"terms"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L785-L799 |
Metatab/metatab | metatab/doc.py | MetatabDoc.as_csv | def as_csv(self):
"""Return a CSV representation as a string"""
from io import StringIO
s = StringIO()
w = csv.writer(s)
for row in self.rows:
w.writerow(row)
return s.getvalue() | python | def as_csv(self):
"""Return a CSV representation as a string"""
from io import StringIO
s = StringIO()
w = csv.writer(s)
for row in self.rows:
w.writerow(row)
return s.getvalue() | [
"def",
"as_csv",
"(",
"self",
")",
":",
"from",
"io",
"import",
"StringIO",
"s",
"=",
"StringIO",
"(",
")",
"w",
"=",
"csv",
".",
"writer",
"(",
"s",
")",
"for",
"row",
"in",
"self",
".",
"rows",
":",
"w",
".",
"writerow",
"(",
"row",
")",
"return",
"s",
".",
"getvalue",
"(",
")"
] | Return a CSV representation as a string | [
"Return",
"a",
"CSV",
"representation",
"as",
"a",
"string"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L801-L811 |
Metatab/metatab | metatab/doc.py | MetatabDoc.as_lines | def as_lines(self):
"""Return a Lines representation as a string"""
out_lines = []
for t,v in self.lines:
# Make the output prettier
if t == 'Section':
out_lines.append('')
out_lines.append('{}: {}'.format(t,v if v is not None else '') )
return '\n'.join(out_lines) | python | def as_lines(self):
"""Return a Lines representation as a string"""
out_lines = []
for t,v in self.lines:
# Make the output prettier
if t == 'Section':
out_lines.append('')
out_lines.append('{}: {}'.format(t,v if v is not None else '') )
return '\n'.join(out_lines) | [
"def",
"as_lines",
"(",
"self",
")",
":",
"out_lines",
"=",
"[",
"]",
"for",
"t",
",",
"v",
"in",
"self",
".",
"lines",
":",
"# Make the output prettier",
"if",
"t",
"==",
"'Section'",
":",
"out_lines",
".",
"append",
"(",
"''",
")",
"out_lines",
".",
"append",
"(",
"'{}: {}'",
".",
"format",
"(",
"t",
",",
"v",
"if",
"v",
"is",
"not",
"None",
"else",
"''",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"out_lines",
")"
] | Return a Lines representation as a string | [
"Return",
"a",
"Lines",
"representation",
"as",
"a",
"string"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L813-L825 |
KelSolaar/Manager | manager/QWidget_component.py | QWidgetComponentFactory | def QWidgetComponentFactory(ui_file=None, *args, **kwargs):
"""
Defines a class factory creating :class:`QWidgetComponent` classes using given ui file.
:param ui_file: Ui file.
:type ui_file: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: QWidgetComponent class.
:rtype: QWidgetComponent
"""
class QWidgetComponent(foundations.ui.common.QWidget_factory(ui_file=ui_file)):
"""
Defines the base class for **Manager** package QWidget Components.
"""
component_activated = pyqtSignal()
"""
This signal is emited by the :class:`QObjectComponent` class when the Component is activated.
"""
component_deactivated = pyqtSignal()
"""
This signal is emited by the :class:`QObjectComponent` class when the Component is deactivated.
"""
component_initialized_ui = pyqtSignal()
"""
This signal is emited by the :class:`QObjectComponent` class when the Component ui is initialized.
"""
component_uninitialized_ui = pyqtSignal()
"""
This signal is emited by the :class:`QObjectComponent` class when the Component ui is uninitialized.
"""
def __init__(self, parent=None, name=None, *args, **kwargs):
"""
Initializes the class.
:param parent: Object parent.
:type parent: QObject
:param name: Component name.
:type name: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
super(QWidgetComponent, self).__init__(parent, *args, **kwargs)
# --- Setting class attributes. ---
self.__name = None
self.name = name
self.__activated = False
self.__initialized_ui = False
self.__deactivatable = True
@property
def name(self):
"""
Property for **self.__name** attribute.
:return: self.__name.
:rtype: unicode
"""
return self.__name
@name.setter
@foundations.exceptions.handle_exceptions(AssertionError)
def name(self, value):
"""
Setter for **self.__name** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"name", value)
self.__name = value
@name.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def name(self):
"""
Deleter for **self.__name** attribute.
"""
raise foundations.exceptions.ProgrammingError("{0} | '{1}' attribute is not deletable!".format(
self.__class__.__name__, "name"))
@property
def activated(self):
"""
Property for **self.__activated** attribute.
:return: self.__activated.
:rtype: unicode
"""
return self.__activated
@activated.setter
@foundations.exceptions.handle_exceptions(AssertionError)
def activated(self, value):
"""
Setter for **self.__activated** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("activated", value)
self.component_activated.emit() if value else self.component_deactivated.emit()
self.__activated = value
@activated.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def activated(self):
"""
Deleter for **self.__activated** attribute.
"""
raise foundations.exceptions.ProgrammingError("{0} | '{1}' attribute is not deletable!".format(
self.__class__.__name__, "activated"))
@property
def initialized_ui(self):
"""
Property for **self.__initialized_ui** attribute.
:return: self.__initialized_ui.
:rtype: bool
"""
return self.__initialized_ui
@initialized_ui.setter
@foundations.exceptions.handle_exceptions(AssertionError)
def initialized_ui(self, value):
"""
Setter for **self.__initialized_ui** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format(
"initialized_ui", value)
self.component_initialized_ui.emit() if value else self.component_uninitialized_ui.emit()
self.__initialized_ui = value
@initialized_ui.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def initialized_ui(self):
"""
Deleter for **self.__initialized_ui** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "initialized_ui"))
@property
def deactivatable(self):
"""
Property for **self.__deactivatable** attribute.
:return: self.__deactivatable.
:rtype: unicode
"""
return self.__deactivatable
@deactivatable.setter
@foundations.exceptions.handle_exceptions(AssertionError)
def deactivatable(self, value):
"""
Setter for **self.__deactivatable** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("deactivatable", value)
self.__deactivatable = value
@deactivatable.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def deactivatable(self):
"""
Deleter for **self.__deactivatable** attribute.
"""
raise foundations.exceptions.ProgrammingError("{0} | '{1}' attribute is not deletable!".format(
self.__class__.__name__, "deactivatable"))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def activate(self):
"""
Sets Component activation state.
:return: Method success.
:rtype: bool
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.activate.__name__, self.__class__.__name__))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def deactivate(self):
"""
Unsets Component activation state.
:return: Method success.
:rtype: bool
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def initialize_ui(self):
"""
Initializes the Component ui.
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def add_widget(self):
"""
Adds the Component Widget ui.
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def remove_widget(self):
"""
Removes the Component Widget ui.
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def uninitialize_ui(self):
"""
Uninitializes the Component ui.
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))
return QWidgetComponent | python | def QWidgetComponentFactory(ui_file=None, *args, **kwargs):
"""
Defines a class factory creating :class:`QWidgetComponent` classes using given ui file.
:param ui_file: Ui file.
:type ui_file: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: QWidgetComponent class.
:rtype: QWidgetComponent
"""
class QWidgetComponent(foundations.ui.common.QWidget_factory(ui_file=ui_file)):
"""
Defines the base class for **Manager** package QWidget Components.
"""
component_activated = pyqtSignal()
"""
This signal is emited by the :class:`QObjectComponent` class when the Component is activated.
"""
component_deactivated = pyqtSignal()
"""
This signal is emited by the :class:`QObjectComponent` class when the Component is deactivated.
"""
component_initialized_ui = pyqtSignal()
"""
This signal is emited by the :class:`QObjectComponent` class when the Component ui is initialized.
"""
component_uninitialized_ui = pyqtSignal()
"""
This signal is emited by the :class:`QObjectComponent` class when the Component ui is uninitialized.
"""
def __init__(self, parent=None, name=None, *args, **kwargs):
"""
Initializes the class.
:param parent: Object parent.
:type parent: QObject
:param name: Component name.
:type name: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
super(QWidgetComponent, self).__init__(parent, *args, **kwargs)
# --- Setting class attributes. ---
self.__name = None
self.name = name
self.__activated = False
self.__initialized_ui = False
self.__deactivatable = True
@property
def name(self):
"""
Property for **self.__name** attribute.
:return: self.__name.
:rtype: unicode
"""
return self.__name
@name.setter
@foundations.exceptions.handle_exceptions(AssertionError)
def name(self, value):
"""
Setter for **self.__name** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"name", value)
self.__name = value
@name.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def name(self):
"""
Deleter for **self.__name** attribute.
"""
raise foundations.exceptions.ProgrammingError("{0} | '{1}' attribute is not deletable!".format(
self.__class__.__name__, "name"))
@property
def activated(self):
"""
Property for **self.__activated** attribute.
:return: self.__activated.
:rtype: unicode
"""
return self.__activated
@activated.setter
@foundations.exceptions.handle_exceptions(AssertionError)
def activated(self, value):
"""
Setter for **self.__activated** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("activated", value)
self.component_activated.emit() if value else self.component_deactivated.emit()
self.__activated = value
@activated.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def activated(self):
"""
Deleter for **self.__activated** attribute.
"""
raise foundations.exceptions.ProgrammingError("{0} | '{1}' attribute is not deletable!".format(
self.__class__.__name__, "activated"))
@property
def initialized_ui(self):
"""
Property for **self.__initialized_ui** attribute.
:return: self.__initialized_ui.
:rtype: bool
"""
return self.__initialized_ui
@initialized_ui.setter
@foundations.exceptions.handle_exceptions(AssertionError)
def initialized_ui(self, value):
"""
Setter for **self.__initialized_ui** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format(
"initialized_ui", value)
self.component_initialized_ui.emit() if value else self.component_uninitialized_ui.emit()
self.__initialized_ui = value
@initialized_ui.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def initialized_ui(self):
"""
Deleter for **self.__initialized_ui** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "initialized_ui"))
@property
def deactivatable(self):
"""
Property for **self.__deactivatable** attribute.
:return: self.__deactivatable.
:rtype: unicode
"""
return self.__deactivatable
@deactivatable.setter
@foundations.exceptions.handle_exceptions(AssertionError)
def deactivatable(self, value):
"""
Setter for **self.__deactivatable** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("deactivatable", value)
self.__deactivatable = value
@deactivatable.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def deactivatable(self):
"""
Deleter for **self.__deactivatable** attribute.
"""
raise foundations.exceptions.ProgrammingError("{0} | '{1}' attribute is not deletable!".format(
self.__class__.__name__, "deactivatable"))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def activate(self):
"""
Sets Component activation state.
:return: Method success.
:rtype: bool
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.activate.__name__, self.__class__.__name__))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def deactivate(self):
"""
Unsets Component activation state.
:return: Method success.
:rtype: bool
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def initialize_ui(self):
"""
Initializes the Component ui.
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def add_widget(self):
"""
Adds the Component Widget ui.
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def remove_widget(self):
"""
Removes the Component Widget ui.
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))
@foundations.exceptions.handle_exceptions(NotImplementedError)
def uninitialize_ui(self):
"""
Uninitializes the Component ui.
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(
self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))
return QWidgetComponent | [
"def",
"QWidgetComponentFactory",
"(",
"ui_file",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"class",
"QWidgetComponent",
"(",
"foundations",
".",
"ui",
".",
"common",
".",
"QWidget_factory",
"(",
"ui_file",
"=",
"ui_file",
")",
")",
":",
"\"\"\"\n Defines the base class for **Manager** package QWidget Components.\n \"\"\"",
"component_activated",
"=",
"pyqtSignal",
"(",
")",
"\"\"\"\n This signal is emited by the :class:`QObjectComponent` class when the Component is activated.\n \"\"\"",
"component_deactivated",
"=",
"pyqtSignal",
"(",
")",
"\"\"\"\n This signal is emited by the :class:`QObjectComponent` class when the Component is deactivated.\n \"\"\"",
"component_initialized_ui",
"=",
"pyqtSignal",
"(",
")",
"\"\"\"\n This signal is emited by the :class:`QObjectComponent` class when the Component ui is initialized.\n \"\"\"",
"component_uninitialized_ui",
"=",
"pyqtSignal",
"(",
")",
"\"\"\"\n This signal is emited by the :class:`QObjectComponent` class when the Component ui is uninitialized.\n \"\"\"",
"def",
"__init__",
"(",
"self",
",",
"parent",
"=",
"None",
",",
"name",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Initializes the class.\n\n :param parent: Object parent.\n :type parent: QObject\n :param name: Component name.\n :type name: unicode\n :param \\*args: Arguments.\n :type \\*args: \\*\n :param \\*\\*kwargs: Keywords arguments.\n :type \\*\\*kwargs: \\*\\*\n \"\"\"",
"LOGGER",
".",
"debug",
"(",
"\"> Initializing '{0}()' class.\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"super",
"(",
"QWidgetComponent",
",",
"self",
")",
".",
"__init__",
"(",
"parent",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# --- Setting class attributes. ---",
"self",
".",
"__name",
"=",
"None",
"self",
".",
"name",
"=",
"name",
"self",
".",
"__activated",
"=",
"False",
"self",
".",
"__initialized_ui",
"=",
"False",
"self",
".",
"__deactivatable",
"=",
"True",
"@",
"property",
"def",
"name",
"(",
"self",
")",
":",
"\"\"\"\n Property for **self.__name** attribute.\n\n :return: self.__name.\n :rtype: unicode\n \"\"\"",
"return",
"self",
".",
"__name",
"@",
"name",
".",
"setter",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"AssertionError",
")",
"def",
"name",
"(",
"self",
",",
"value",
")",
":",
"\"\"\"\n Setter for **self.__name** attribute.\n\n :param value: Attribute value.\n :type value: unicode\n \"\"\"",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"is",
"unicode",
",",
"\"'{0}' attribute: '{1}' type is not 'unicode'!\"",
".",
"format",
"(",
"\"name\"",
",",
"value",
")",
"self",
".",
"__name",
"=",
"value",
"@",
"name",
".",
"deleter",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"foundations",
".",
"exceptions",
".",
"ProgrammingError",
")",
"def",
"name",
"(",
"self",
")",
":",
"\"\"\"\n Deleter for **self.__name** attribute.\n \"\"\"",
"raise",
"foundations",
".",
"exceptions",
".",
"ProgrammingError",
"(",
"\"{0} | '{1}' attribute is not deletable!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"\"name\"",
")",
")",
"@",
"property",
"def",
"activated",
"(",
"self",
")",
":",
"\"\"\"\n Property for **self.__activated** attribute.\n\n :return: self.__activated.\n :rtype: unicode\n \"\"\"",
"return",
"self",
".",
"__activated",
"@",
"activated",
".",
"setter",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"AssertionError",
")",
"def",
"activated",
"(",
"self",
",",
"value",
")",
":",
"\"\"\"\n Setter for **self.__activated** attribute.\n\n :param value: Attribute value.\n :type value: unicode\n \"\"\"",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"is",
"bool",
",",
"\"'{0}' attribute: '{1}' type is not 'bool'!\"",
".",
"format",
"(",
"\"activated\"",
",",
"value",
")",
"self",
".",
"component_activated",
".",
"emit",
"(",
")",
"if",
"value",
"else",
"self",
".",
"component_deactivated",
".",
"emit",
"(",
")",
"self",
".",
"__activated",
"=",
"value",
"@",
"activated",
".",
"deleter",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"foundations",
".",
"exceptions",
".",
"ProgrammingError",
")",
"def",
"activated",
"(",
"self",
")",
":",
"\"\"\"\n Deleter for **self.__activated** attribute.\n \"\"\"",
"raise",
"foundations",
".",
"exceptions",
".",
"ProgrammingError",
"(",
"\"{0} | '{1}' attribute is not deletable!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"\"activated\"",
")",
")",
"@",
"property",
"def",
"initialized_ui",
"(",
"self",
")",
":",
"\"\"\"\n Property for **self.__initialized_ui** attribute.\n\n :return: self.__initialized_ui.\n :rtype: bool\n \"\"\"",
"return",
"self",
".",
"__initialized_ui",
"@",
"initialized_ui",
".",
"setter",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"AssertionError",
")",
"def",
"initialized_ui",
"(",
"self",
",",
"value",
")",
":",
"\"\"\"\n Setter for **self.__initialized_ui** attribute.\n\n :param value: Attribute value.\n :type value: bool\n \"\"\"",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"is",
"bool",
",",
"\"'{0}' attribute: '{1}' type is not 'bool'!\"",
".",
"format",
"(",
"\"initialized_ui\"",
",",
"value",
")",
"self",
".",
"component_initialized_ui",
".",
"emit",
"(",
")",
"if",
"value",
"else",
"self",
".",
"component_uninitialized_ui",
".",
"emit",
"(",
")",
"self",
".",
"__initialized_ui",
"=",
"value",
"@",
"initialized_ui",
".",
"deleter",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"foundations",
".",
"exceptions",
".",
"ProgrammingError",
")",
"def",
"initialized_ui",
"(",
"self",
")",
":",
"\"\"\"\n Deleter for **self.__initialized_ui** attribute.\n \"\"\"",
"raise",
"foundations",
".",
"exceptions",
".",
"ProgrammingError",
"(",
"\"{0} | '{1}' attribute is not deletable!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"\"initialized_ui\"",
")",
")",
"@",
"property",
"def",
"deactivatable",
"(",
"self",
")",
":",
"\"\"\"\n Property for **self.__deactivatable** attribute.\n\n :return: self.__deactivatable.\n :rtype: unicode\n \"\"\"",
"return",
"self",
".",
"__deactivatable",
"@",
"deactivatable",
".",
"setter",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"AssertionError",
")",
"def",
"deactivatable",
"(",
"self",
",",
"value",
")",
":",
"\"\"\"\n Setter for **self.__deactivatable** attribute.\n\n :param value: Attribute value.\n :type value: unicode\n \"\"\"",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"is",
"bool",
",",
"\"'{0}' attribute: '{1}' type is not 'bool'!\"",
".",
"format",
"(",
"\"deactivatable\"",
",",
"value",
")",
"self",
".",
"__deactivatable",
"=",
"value",
"@",
"deactivatable",
".",
"deleter",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"foundations",
".",
"exceptions",
".",
"ProgrammingError",
")",
"def",
"deactivatable",
"(",
"self",
")",
":",
"\"\"\"\n Deleter for **self.__deactivatable** attribute.\n \"\"\"",
"raise",
"foundations",
".",
"exceptions",
".",
"ProgrammingError",
"(",
"\"{0} | '{1}' attribute is not deletable!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"\"deactivatable\"",
")",
")",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"NotImplementedError",
")",
"def",
"activate",
"(",
"self",
")",
":",
"\"\"\"\n Sets Component activation state.\n\n :return: Method success.\n :rtype: bool\n \"\"\"",
"raise",
"NotImplementedError",
"(",
"\"{0} | '{1}' must be implemented by '{2}' subclasses!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"activate",
".",
"__name__",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"NotImplementedError",
")",
"def",
"deactivate",
"(",
"self",
")",
":",
"\"\"\"\n Unsets Component activation state.\n\n :return: Method success.\n :rtype: bool\n \"\"\"",
"raise",
"NotImplementedError",
"(",
"\"{0} | '{1}' must be implemented by '{2}' subclasses!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"deactivate",
".",
"__name__",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"NotImplementedError",
")",
"def",
"initialize_ui",
"(",
"self",
")",
":",
"\"\"\"\n Initializes the Component ui.\n \"\"\"",
"raise",
"NotImplementedError",
"(",
"\"{0} | '{1}' must be implemented by '{2}' subclasses!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"deactivate",
".",
"__name__",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"NotImplementedError",
")",
"def",
"add_widget",
"(",
"self",
")",
":",
"\"\"\"\n Adds the Component Widget ui.\n \"\"\"",
"raise",
"NotImplementedError",
"(",
"\"{0} | '{1}' must be implemented by '{2}' subclasses!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"deactivate",
".",
"__name__",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"NotImplementedError",
")",
"def",
"remove_widget",
"(",
"self",
")",
":",
"\"\"\"\n Removes the Component Widget ui.\n \"\"\"",
"raise",
"NotImplementedError",
"(",
"\"{0} | '{1}' must be implemented by '{2}' subclasses!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"deactivate",
".",
"__name__",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"@",
"foundations",
".",
"exceptions",
".",
"handle_exceptions",
"(",
"NotImplementedError",
")",
"def",
"uninitialize_ui",
"(",
"self",
")",
":",
"\"\"\"\n Uninitializes the Component ui.\n \"\"\"",
"raise",
"NotImplementedError",
"(",
"\"{0} | '{1}' must be implemented by '{2}' subclasses!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"deactivate",
".",
"__name__",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"return",
"QWidgetComponent"
] | Defines a class factory creating :class:`QWidgetComponent` classes using given ui file.
:param ui_file: Ui file.
:type ui_file: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: QWidgetComponent class.
:rtype: QWidgetComponent | [
"Defines",
"a",
"class",
"factory",
"creating",
":",
"class",
":",
"QWidgetComponent",
"classes",
"using",
"given",
"ui",
"file",
"."
] | train | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/QWidget_component.py#L37-L307 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/restructuring_visitor.py | _restructure_if_volume_follows_journal | def _restructure_if_volume_follows_journal(left, right):
"""Remove volume node if it follows a journal logically in the tree hierarchy.
Args:
left (ast.ASTElement): The journal KeywordOp node.
right (ast.ASTElement): The rest of the tree to be restructured.
Return:
(ast.ASTElement): The restructured tree, with the volume node removed.
Notes:
This happens to support queries like "journal Phys.Rev. and vol d85". Appends the value of KeywordOp with
Keyword 'volume' and discards 'volume' KeywordOp node from the tree.
"""
def _get_volume_keyword_op_and_remaining_subtree(right_subtree):
if isinstance(right_subtree, NotOp) and isinstance(right_subtree.op, KeywordOp) \
and right_subtree.op.left == Keyword('volume'):
return None, None
elif isinstance(right_subtree, AndOp) and isinstance(right_subtree.left, NotOp) \
and isinstance(right_subtree.left.op, KeywordOp) and right_subtree.left.op.left == Keyword('volume'):
return None, right_subtree.right
elif isinstance(right_subtree, KeywordOp) and right_subtree.left == Keyword('volume'):
return right_subtree, None
elif isinstance(right_subtree, AndOp) and right_subtree.left.left == Keyword('volume'):
return right_subtree.left, right_subtree.right
journal_value = left.right.value
volume_and_remaining_subtree = _get_volume_keyword_op_and_remaining_subtree(right)
if not volume_and_remaining_subtree:
return
volume_node, remaining_subtree = volume_and_remaining_subtree
if volume_node:
left.right.value = ','.join([journal_value, volume_node.right.value])
return AndOp(left, remaining_subtree) if remaining_subtree else left | python | def _restructure_if_volume_follows_journal(left, right):
"""Remove volume node if it follows a journal logically in the tree hierarchy.
Args:
left (ast.ASTElement): The journal KeywordOp node.
right (ast.ASTElement): The rest of the tree to be restructured.
Return:
(ast.ASTElement): The restructured tree, with the volume node removed.
Notes:
This happens to support queries like "journal Phys.Rev. and vol d85". Appends the value of KeywordOp with
Keyword 'volume' and discards 'volume' KeywordOp node from the tree.
"""
def _get_volume_keyword_op_and_remaining_subtree(right_subtree):
if isinstance(right_subtree, NotOp) and isinstance(right_subtree.op, KeywordOp) \
and right_subtree.op.left == Keyword('volume'):
return None, None
elif isinstance(right_subtree, AndOp) and isinstance(right_subtree.left, NotOp) \
and isinstance(right_subtree.left.op, KeywordOp) and right_subtree.left.op.left == Keyword('volume'):
return None, right_subtree.right
elif isinstance(right_subtree, KeywordOp) and right_subtree.left == Keyword('volume'):
return right_subtree, None
elif isinstance(right_subtree, AndOp) and right_subtree.left.left == Keyword('volume'):
return right_subtree.left, right_subtree.right
journal_value = left.right.value
volume_and_remaining_subtree = _get_volume_keyword_op_and_remaining_subtree(right)
if not volume_and_remaining_subtree:
return
volume_node, remaining_subtree = volume_and_remaining_subtree
if volume_node:
left.right.value = ','.join([journal_value, volume_node.right.value])
return AndOp(left, remaining_subtree) if remaining_subtree else left | [
"def",
"_restructure_if_volume_follows_journal",
"(",
"left",
",",
"right",
")",
":",
"def",
"_get_volume_keyword_op_and_remaining_subtree",
"(",
"right_subtree",
")",
":",
"if",
"isinstance",
"(",
"right_subtree",
",",
"NotOp",
")",
"and",
"isinstance",
"(",
"right_subtree",
".",
"op",
",",
"KeywordOp",
")",
"and",
"right_subtree",
".",
"op",
".",
"left",
"==",
"Keyword",
"(",
"'volume'",
")",
":",
"return",
"None",
",",
"None",
"elif",
"isinstance",
"(",
"right_subtree",
",",
"AndOp",
")",
"and",
"isinstance",
"(",
"right_subtree",
".",
"left",
",",
"NotOp",
")",
"and",
"isinstance",
"(",
"right_subtree",
".",
"left",
".",
"op",
",",
"KeywordOp",
")",
"and",
"right_subtree",
".",
"left",
".",
"op",
".",
"left",
"==",
"Keyword",
"(",
"'volume'",
")",
":",
"return",
"None",
",",
"right_subtree",
".",
"right",
"elif",
"isinstance",
"(",
"right_subtree",
",",
"KeywordOp",
")",
"and",
"right_subtree",
".",
"left",
"==",
"Keyword",
"(",
"'volume'",
")",
":",
"return",
"right_subtree",
",",
"None",
"elif",
"isinstance",
"(",
"right_subtree",
",",
"AndOp",
")",
"and",
"right_subtree",
".",
"left",
".",
"left",
"==",
"Keyword",
"(",
"'volume'",
")",
":",
"return",
"right_subtree",
".",
"left",
",",
"right_subtree",
".",
"right",
"journal_value",
"=",
"left",
".",
"right",
".",
"value",
"volume_and_remaining_subtree",
"=",
"_get_volume_keyword_op_and_remaining_subtree",
"(",
"right",
")",
"if",
"not",
"volume_and_remaining_subtree",
":",
"return",
"volume_node",
",",
"remaining_subtree",
"=",
"volume_and_remaining_subtree",
"if",
"volume_node",
":",
"left",
".",
"right",
".",
"value",
"=",
"','",
".",
"join",
"(",
"[",
"journal_value",
",",
"volume_node",
".",
"right",
".",
"value",
"]",
")",
"return",
"AndOp",
"(",
"left",
",",
"remaining_subtree",
")",
"if",
"remaining_subtree",
"else",
"left"
] | Remove volume node if it follows a journal logically in the tree hierarchy.
Args:
left (ast.ASTElement): The journal KeywordOp node.
right (ast.ASTElement): The rest of the tree to be restructured.
Return:
(ast.ASTElement): The restructured tree, with the volume node removed.
Notes:
This happens to support queries like "journal Phys.Rev. and vol d85". Appends the value of KeywordOp with
Keyword 'volume' and discards 'volume' KeywordOp node from the tree. | [
"Remove",
"volume",
"node",
"if",
"it",
"follows",
"a",
"journal",
"logically",
"in",
"the",
"tree",
"hierarchy",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/restructuring_visitor.py#L48-L87 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/restructuring_visitor.py | _convert_simple_value_boolean_query_to_and_boolean_queries | def _convert_simple_value_boolean_query_to_and_boolean_queries(tree, keyword):
"""Chain SimpleValueBooleanQuery values into chained AndOp queries with the given current Keyword."""
def _create_operator_node(value_node):
"""Creates a KeywordOp or a ValueOp node."""
base_node = value_node.op if isinstance(value_node, NotOp) else value_node
updated_base_node = KeywordOp(keyword, base_node) if keyword else ValueOp(base_node)
return NotOp(updated_base_node) if isinstance(value_node, NotOp) else updated_base_node
def _get_bool_op_type(bool_op):
return AndOp if isinstance(bool_op, And) else OrOp
new_tree_root = _get_bool_op_type(tree.bool_op)(None, None)
current_tree = new_tree_root
previous_tree = tree
while True: # Walk down the tree while building the new AndOp queries subtree.
current_tree.left = _create_operator_node(previous_tree.left)
if not isinstance(previous_tree.right, SimpleValueBooleanQuery):
current_tree.right = _create_operator_node(previous_tree.right)
break
previous_tree = previous_tree.right
current_tree.right = _get_bool_op_type(previous_tree.bool_op)(None, None)
current_tree = current_tree.right
return new_tree_root | python | def _convert_simple_value_boolean_query_to_and_boolean_queries(tree, keyword):
"""Chain SimpleValueBooleanQuery values into chained AndOp queries with the given current Keyword."""
def _create_operator_node(value_node):
"""Creates a KeywordOp or a ValueOp node."""
base_node = value_node.op if isinstance(value_node, NotOp) else value_node
updated_base_node = KeywordOp(keyword, base_node) if keyword else ValueOp(base_node)
return NotOp(updated_base_node) if isinstance(value_node, NotOp) else updated_base_node
def _get_bool_op_type(bool_op):
return AndOp if isinstance(bool_op, And) else OrOp
new_tree_root = _get_bool_op_type(tree.bool_op)(None, None)
current_tree = new_tree_root
previous_tree = tree
while True: # Walk down the tree while building the new AndOp queries subtree.
current_tree.left = _create_operator_node(previous_tree.left)
if not isinstance(previous_tree.right, SimpleValueBooleanQuery):
current_tree.right = _create_operator_node(previous_tree.right)
break
previous_tree = previous_tree.right
current_tree.right = _get_bool_op_type(previous_tree.bool_op)(None, None)
current_tree = current_tree.right
return new_tree_root | [
"def",
"_convert_simple_value_boolean_query_to_and_boolean_queries",
"(",
"tree",
",",
"keyword",
")",
":",
"def",
"_create_operator_node",
"(",
"value_node",
")",
":",
"\"\"\"Creates a KeywordOp or a ValueOp node.\"\"\"",
"base_node",
"=",
"value_node",
".",
"op",
"if",
"isinstance",
"(",
"value_node",
",",
"NotOp",
")",
"else",
"value_node",
"updated_base_node",
"=",
"KeywordOp",
"(",
"keyword",
",",
"base_node",
")",
"if",
"keyword",
"else",
"ValueOp",
"(",
"base_node",
")",
"return",
"NotOp",
"(",
"updated_base_node",
")",
"if",
"isinstance",
"(",
"value_node",
",",
"NotOp",
")",
"else",
"updated_base_node",
"def",
"_get_bool_op_type",
"(",
"bool_op",
")",
":",
"return",
"AndOp",
"if",
"isinstance",
"(",
"bool_op",
",",
"And",
")",
"else",
"OrOp",
"new_tree_root",
"=",
"_get_bool_op_type",
"(",
"tree",
".",
"bool_op",
")",
"(",
"None",
",",
"None",
")",
"current_tree",
"=",
"new_tree_root",
"previous_tree",
"=",
"tree",
"while",
"True",
":",
"# Walk down the tree while building the new AndOp queries subtree.",
"current_tree",
".",
"left",
"=",
"_create_operator_node",
"(",
"previous_tree",
".",
"left",
")",
"if",
"not",
"isinstance",
"(",
"previous_tree",
".",
"right",
",",
"SimpleValueBooleanQuery",
")",
":",
"current_tree",
".",
"right",
"=",
"_create_operator_node",
"(",
"previous_tree",
".",
"right",
")",
"break",
"previous_tree",
"=",
"previous_tree",
".",
"right",
"current_tree",
".",
"right",
"=",
"_get_bool_op_type",
"(",
"previous_tree",
".",
"bool_op",
")",
"(",
"None",
",",
"None",
")",
"current_tree",
"=",
"current_tree",
".",
"right",
"return",
"new_tree_root"
] | Chain SimpleValueBooleanQuery values into chained AndOp queries with the given current Keyword. | [
"Chain",
"SimpleValueBooleanQuery",
"values",
"into",
"chained",
"AndOp",
"queries",
"with",
"the",
"given",
"current",
"Keyword",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/restructuring_visitor.py#L90-L118 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/restructuring_visitor.py | RestructuringVisitor.visit_boolean_query | def visit_boolean_query(self, node):
"""Convert BooleanRule into AndOp or OrOp nodes."""
left = node.left.accept(self)
right = node.right.accept(self)
is_journal_keyword_op = isinstance(left, KeywordOp) and left.left == Keyword('journal')
if is_journal_keyword_op:
journal_and_volume_conjunction = _restructure_if_volume_follows_journal(left, right)
if journal_and_volume_conjunction:
return journal_and_volume_conjunction
return AndOp(left, right) if isinstance(node.bool_op, And) else OrOp(left, right) | python | def visit_boolean_query(self, node):
"""Convert BooleanRule into AndOp or OrOp nodes."""
left = node.left.accept(self)
right = node.right.accept(self)
is_journal_keyword_op = isinstance(left, KeywordOp) and left.left == Keyword('journal')
if is_journal_keyword_op:
journal_and_volume_conjunction = _restructure_if_volume_follows_journal(left, right)
if journal_and_volume_conjunction:
return journal_and_volume_conjunction
return AndOp(left, right) if isinstance(node.bool_op, And) else OrOp(left, right) | [
"def",
"visit_boolean_query",
"(",
"self",
",",
"node",
")",
":",
"left",
"=",
"node",
".",
"left",
".",
"accept",
"(",
"self",
")",
"right",
"=",
"node",
".",
"right",
".",
"accept",
"(",
"self",
")",
"is_journal_keyword_op",
"=",
"isinstance",
"(",
"left",
",",
"KeywordOp",
")",
"and",
"left",
".",
"left",
"==",
"Keyword",
"(",
"'journal'",
")",
"if",
"is_journal_keyword_op",
":",
"journal_and_volume_conjunction",
"=",
"_restructure_if_volume_follows_journal",
"(",
"left",
",",
"right",
")",
"if",
"journal_and_volume_conjunction",
":",
"return",
"journal_and_volume_conjunction",
"return",
"AndOp",
"(",
"left",
",",
"right",
")",
"if",
"isinstance",
"(",
"node",
".",
"bool_op",
",",
"And",
")",
"else",
"OrOp",
"(",
"left",
",",
"right",
")"
] | Convert BooleanRule into AndOp or OrOp nodes. | [
"Convert",
"BooleanRule",
"into",
"AndOp",
"or",
"OrOp",
"nodes",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/restructuring_visitor.py#L160-L173 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/restructuring_visitor.py | RestructuringVisitor.visit_simple_value_boolean_query | def visit_simple_value_boolean_query(self, node):
"""
Visits only the children of :class:`SimpleValueBooleanQuery` without substituting the actual node type.
Notes:
Defer conversion from :class:`SimpleValueBooleanQuery` to AndOp or OrOp.
This transformation needs to occur higher in the tree, so that we don't lose the information that this is a
boolean query among terminals and thus the associative rule needs to be applied if we reached here from a
keyword query, or a conversion from :class:`SimpleValueBooleanQuery` to :class:`AndOp` or :class:`OrOp`,
otherwise.
"""
node.left, node.right = node.left.accept(self), node.right.accept(self)
return node | python | def visit_simple_value_boolean_query(self, node):
"""
Visits only the children of :class:`SimpleValueBooleanQuery` without substituting the actual node type.
Notes:
Defer conversion from :class:`SimpleValueBooleanQuery` to AndOp or OrOp.
This transformation needs to occur higher in the tree, so that we don't lose the information that this is a
boolean query among terminals and thus the associative rule needs to be applied if we reached here from a
keyword query, or a conversion from :class:`SimpleValueBooleanQuery` to :class:`AndOp` or :class:`OrOp`,
otherwise.
"""
node.left, node.right = node.left.accept(self), node.right.accept(self)
return node | [
"def",
"visit_simple_value_boolean_query",
"(",
"self",
",",
"node",
")",
":",
"node",
".",
"left",
",",
"node",
".",
"right",
"=",
"node",
".",
"left",
".",
"accept",
"(",
"self",
")",
",",
"node",
".",
"right",
".",
"accept",
"(",
"self",
")",
"return",
"node"
] | Visits only the children of :class:`SimpleValueBooleanQuery` without substituting the actual node type.
Notes:
Defer conversion from :class:`SimpleValueBooleanQuery` to AndOp or OrOp.
This transformation needs to occur higher in the tree, so that we don't lose the information that this is a
boolean query among terminals and thus the associative rule needs to be applied if we reached here from a
keyword query, or a conversion from :class:`SimpleValueBooleanQuery` to :class:`AndOp` or :class:`OrOp`,
otherwise. | [
"Visits",
"only",
"the",
"children",
"of",
":",
"class",
":",
"SimpleValueBooleanQuery",
"without",
"substituting",
"the",
"actual",
"node",
"type",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/restructuring_visitor.py#L175-L187 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/restructuring_visitor.py | RestructuringVisitor.visit_spires_keyword_query | def visit_spires_keyword_query(self, node):
"""Transform a :class:`SpiresKeywordQuery` into a :class:`KeywordOp`.
Notes:
In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained
:class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and
values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or
:class:`SimpleValueNegation`.)
"""
keyword = node.left.accept(self)
value = node.right.accept(self)
if isinstance(value, SimpleValueBooleanQuery):
return _convert_simple_value_boolean_query_to_and_boolean_queries(value, keyword)
return KeywordOp(keyword, value) | python | def visit_spires_keyword_query(self, node):
"""Transform a :class:`SpiresKeywordQuery` into a :class:`KeywordOp`.
Notes:
In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained
:class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and
values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or
:class:`SimpleValueNegation`.)
"""
keyword = node.left.accept(self)
value = node.right.accept(self)
if isinstance(value, SimpleValueBooleanQuery):
return _convert_simple_value_boolean_query_to_and_boolean_queries(value, keyword)
return KeywordOp(keyword, value) | [
"def",
"visit_spires_keyword_query",
"(",
"self",
",",
"node",
")",
":",
"keyword",
"=",
"node",
".",
"left",
".",
"accept",
"(",
"self",
")",
"value",
"=",
"node",
".",
"right",
".",
"accept",
"(",
"self",
")",
"if",
"isinstance",
"(",
"value",
",",
"SimpleValueBooleanQuery",
")",
":",
"return",
"_convert_simple_value_boolean_query_to_and_boolean_queries",
"(",
"value",
",",
"keyword",
")",
"return",
"KeywordOp",
"(",
"keyword",
",",
"value",
")"
] | Transform a :class:`SpiresKeywordQuery` into a :class:`KeywordOp`.
Notes:
In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained
:class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and
values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or
:class:`SimpleValueNegation`.) | [
"Transform",
"a",
":",
"class",
":",
"SpiresKeywordQuery",
"into",
"a",
":",
"class",
":",
"KeywordOp",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/restructuring_visitor.py#L206-L221 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/restructuring_visitor.py | RestructuringVisitor.visit_invenio_keyword_query | def visit_invenio_keyword_query(self, node):
"""Transform an :class:`InvenioKeywordQuery` into a :class:`KeywordOp`.
Notes:
In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained
:class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and
values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or
:class:`SimpleValueNegation`.)
"""
try:
keyword = node.left.accept(self)
except AttributeError:
# The keywords whose values aren't an InspireKeyword are simple strings.
keyword = Keyword(node.left)
value = node.right.accept(self)
if isinstance(value, SimpleValueBooleanQuery):
return _convert_simple_value_boolean_query_to_and_boolean_queries(value, keyword)
return KeywordOp(keyword, value) | python | def visit_invenio_keyword_query(self, node):
"""Transform an :class:`InvenioKeywordQuery` into a :class:`KeywordOp`.
Notes:
In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained
:class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and
values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or
:class:`SimpleValueNegation`.)
"""
try:
keyword = node.left.accept(self)
except AttributeError:
# The keywords whose values aren't an InspireKeyword are simple strings.
keyword = Keyword(node.left)
value = node.right.accept(self)
if isinstance(value, SimpleValueBooleanQuery):
return _convert_simple_value_boolean_query_to_and_boolean_queries(value, keyword)
return KeywordOp(keyword, value) | [
"def",
"visit_invenio_keyword_query",
"(",
"self",
",",
"node",
")",
":",
"try",
":",
"keyword",
"=",
"node",
".",
"left",
".",
"accept",
"(",
"self",
")",
"except",
"AttributeError",
":",
"# The keywords whose values aren't an InspireKeyword are simple strings.",
"keyword",
"=",
"Keyword",
"(",
"node",
".",
"left",
")",
"value",
"=",
"node",
".",
"right",
".",
"accept",
"(",
"self",
")",
"if",
"isinstance",
"(",
"value",
",",
"SimpleValueBooleanQuery",
")",
":",
"return",
"_convert_simple_value_boolean_query_to_and_boolean_queries",
"(",
"value",
",",
"keyword",
")",
"return",
"KeywordOp",
"(",
"keyword",
",",
"value",
")"
] | Transform an :class:`InvenioKeywordQuery` into a :class:`KeywordOp`.
Notes:
In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained
:class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and
values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or
:class:`SimpleValueNegation`.) | [
"Transform",
"an",
":",
"class",
":",
"InvenioKeywordQuery",
"into",
"a",
":",
"class",
":",
"KeywordOp",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/restructuring_visitor.py#L223-L243 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/restructuring_visitor.py | RestructuringVisitor.visit_complex_value | def visit_complex_value(self, node):
"""Convert :class:`ComplexValue` to one of ExactMatch, PartialMatch and Regex Value nodes."""
if node.value.startswith(ComplexValue.EXACT_VALUE_TOKEN):
value = node.value.strip(ComplexValue.EXACT_VALUE_TOKEN)
return ExactMatchValue(value)
elif node.value.startswith(ComplexValue.PARTIAL_VALUE_TOKEN):
value = node.value.strip(ComplexValue.PARTIAL_VALUE_TOKEN)
return PartialMatchValue(value, True if ast.GenericValue.WILDCARD_TOKEN in value else False)
elif node.value.startswith(ComplexValue.REGEX_VALUE_TOKEN):
return RegexValue(node.value.strip(ComplexValue.REGEX_VALUE_TOKEN))
else:
# Covering the case where ComplexValue supports more than ExactMatch, PartialMatch and Regex values.
msg = self.__class__.__name__ + ': Unrecognized complex value'
try:
msg += ' lookahead token: "' + node.value[0] + '"'
except IndexError:
msg += ': \"' + repr(node.value) + '"'
msg += '.\nUsing simple value instead: "' + node.value + '".'
logger.warn(msg)
return ast.Value(node.value) | python | def visit_complex_value(self, node):
"""Convert :class:`ComplexValue` to one of ExactMatch, PartialMatch and Regex Value nodes."""
if node.value.startswith(ComplexValue.EXACT_VALUE_TOKEN):
value = node.value.strip(ComplexValue.EXACT_VALUE_TOKEN)
return ExactMatchValue(value)
elif node.value.startswith(ComplexValue.PARTIAL_VALUE_TOKEN):
value = node.value.strip(ComplexValue.PARTIAL_VALUE_TOKEN)
return PartialMatchValue(value, True if ast.GenericValue.WILDCARD_TOKEN in value else False)
elif node.value.startswith(ComplexValue.REGEX_VALUE_TOKEN):
return RegexValue(node.value.strip(ComplexValue.REGEX_VALUE_TOKEN))
else:
# Covering the case where ComplexValue supports more than ExactMatch, PartialMatch and Regex values.
msg = self.__class__.__name__ + ': Unrecognized complex value'
try:
msg += ' lookahead token: "' + node.value[0] + '"'
except IndexError:
msg += ': \"' + repr(node.value) + '"'
msg += '.\nUsing simple value instead: "' + node.value + '".'
logger.warn(msg)
return ast.Value(node.value) | [
"def",
"visit_complex_value",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
".",
"value",
".",
"startswith",
"(",
"ComplexValue",
".",
"EXACT_VALUE_TOKEN",
")",
":",
"value",
"=",
"node",
".",
"value",
".",
"strip",
"(",
"ComplexValue",
".",
"EXACT_VALUE_TOKEN",
")",
"return",
"ExactMatchValue",
"(",
"value",
")",
"elif",
"node",
".",
"value",
".",
"startswith",
"(",
"ComplexValue",
".",
"PARTIAL_VALUE_TOKEN",
")",
":",
"value",
"=",
"node",
".",
"value",
".",
"strip",
"(",
"ComplexValue",
".",
"PARTIAL_VALUE_TOKEN",
")",
"return",
"PartialMatchValue",
"(",
"value",
",",
"True",
"if",
"ast",
".",
"GenericValue",
".",
"WILDCARD_TOKEN",
"in",
"value",
"else",
"False",
")",
"elif",
"node",
".",
"value",
".",
"startswith",
"(",
"ComplexValue",
".",
"REGEX_VALUE_TOKEN",
")",
":",
"return",
"RegexValue",
"(",
"node",
".",
"value",
".",
"strip",
"(",
"ComplexValue",
".",
"REGEX_VALUE_TOKEN",
")",
")",
"else",
":",
"# Covering the case where ComplexValue supports more than ExactMatch, PartialMatch and Regex values.",
"msg",
"=",
"self",
".",
"__class__",
".",
"__name__",
"+",
"': Unrecognized complex value'",
"try",
":",
"msg",
"+=",
"' lookahead token: \"'",
"+",
"node",
".",
"value",
"[",
"0",
"]",
"+",
"'\"'",
"except",
"IndexError",
":",
"msg",
"+=",
"': \\\"'",
"+",
"repr",
"(",
"node",
".",
"value",
")",
"+",
"'\"'",
"msg",
"+=",
"'.\\nUsing simple value instead: \"'",
"+",
"node",
".",
"value",
"+",
"'\".'",
"logger",
".",
"warn",
"(",
"msg",
")",
"return",
"ast",
".",
"Value",
"(",
"node",
".",
"value",
")"
] | Convert :class:`ComplexValue` to one of ExactMatch, PartialMatch and Regex Value nodes. | [
"Convert",
":",
"class",
":",
"ComplexValue",
"to",
"one",
"of",
"ExactMatch",
"PartialMatch",
"and",
"Regex",
"Value",
"nodes",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/restructuring_visitor.py#L281-L302 |
jreese/tasky | tasky/tasks/task.py | Task.sleep | async def sleep(self, duration: float=0.0) -> None:
'''Simple wrapper around `asyncio.sleep()`.'''
duration = max(0, duration)
if duration > 0:
Log.debug('sleeping task %s for %.1f seconds', self.name, duration)
await asyncio.sleep(duration) | python | async def sleep(self, duration: float=0.0) -> None:
'''Simple wrapper around `asyncio.sleep()`.'''
duration = max(0, duration)
if duration > 0:
Log.debug('sleeping task %s for %.1f seconds', self.name, duration)
await asyncio.sleep(duration) | [
"async",
"def",
"sleep",
"(",
"self",
",",
"duration",
":",
"float",
"=",
"0.0",
")",
"->",
"None",
":",
"duration",
"=",
"max",
"(",
"0",
",",
"duration",
")",
"if",
"duration",
">",
"0",
":",
"Log",
".",
"debug",
"(",
"'sleeping task %s for %.1f seconds'",
",",
"self",
".",
"name",
",",
"duration",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"duration",
")"
] | Simple wrapper around `asyncio.sleep()`. | [
"Simple",
"wrapper",
"around",
"asyncio",
".",
"sleep",
"()",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/task.py#L71-L77 |
jreese/tasky | tasky/tasks/task.py | Task.stop | async def stop(self, force: bool=False) -> None:
'''Cancel the task if it hasn't yet started, or tell it to
gracefully stop running if it has.'''
Log.debug('stopping task %s', self.name)
self.running = False
if force:
self.task.cancel() | python | async def stop(self, force: bool=False) -> None:
'''Cancel the task if it hasn't yet started, or tell it to
gracefully stop running if it has.'''
Log.debug('stopping task %s', self.name)
self.running = False
if force:
self.task.cancel() | [
"async",
"def",
"stop",
"(",
"self",
",",
"force",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"Log",
".",
"debug",
"(",
"'stopping task %s'",
",",
"self",
".",
"name",
")",
"self",
".",
"running",
"=",
"False",
"if",
"force",
":",
"self",
".",
"task",
".",
"cancel",
"(",
")"
] | Cancel the task if it hasn't yet started, or tell it to
gracefully stop running if it has. | [
"Cancel",
"the",
"task",
"if",
"it",
"hasn",
"t",
"yet",
"started",
"or",
"tell",
"it",
"to",
"gracefully",
"stop",
"running",
"if",
"it",
"has",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/task.py#L84-L92 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | author_name_contains_fullnames | def author_name_contains_fullnames(author_name):
"""Recognizes whether the name contains full name parts and not initials or only lastname.
Returns:
bool: True if name has only full name parts, e.g. 'Ellis John', False otherwise. So for example, False is
returned for 'Ellis, J.' or 'Ellis'.
"""
def _is_initial(name_part):
return len(name_part) == 1 or u'.' in name_part
parsed_name = ParsedName(author_name)
if len(parsed_name) == 1:
return False
elif any([_is_initial(name_part) for name_part in parsed_name]):
return False
return True | python | def author_name_contains_fullnames(author_name):
"""Recognizes whether the name contains full name parts and not initials or only lastname.
Returns:
bool: True if name has only full name parts, e.g. 'Ellis John', False otherwise. So for example, False is
returned for 'Ellis, J.' or 'Ellis'.
"""
def _is_initial(name_part):
return len(name_part) == 1 or u'.' in name_part
parsed_name = ParsedName(author_name)
if len(parsed_name) == 1:
return False
elif any([_is_initial(name_part) for name_part in parsed_name]):
return False
return True | [
"def",
"author_name_contains_fullnames",
"(",
"author_name",
")",
":",
"def",
"_is_initial",
"(",
"name_part",
")",
":",
"return",
"len",
"(",
"name_part",
")",
"==",
"1",
"or",
"u'.'",
"in",
"name_part",
"parsed_name",
"=",
"ParsedName",
"(",
"author_name",
")",
"if",
"len",
"(",
"parsed_name",
")",
"==",
"1",
":",
"return",
"False",
"elif",
"any",
"(",
"[",
"_is_initial",
"(",
"name_part",
")",
"for",
"name_part",
"in",
"parsed_name",
"]",
")",
":",
"return",
"False",
"return",
"True"
] | Recognizes whether the name contains full name parts and not initials or only lastname.
Returns:
bool: True if name has only full name parts, e.g. 'Ellis John', False otherwise. So for example, False is
returned for 'Ellis, J.' or 'Ellis'. | [
"Recognizes",
"whether",
"the",
"name",
"contains",
"full",
"name",
"parts",
"and",
"not",
"initials",
"or",
"only",
"lastname",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L45-L62 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | _name_variation_has_only_initials | def _name_variation_has_only_initials(name):
"""Detects whether the name variation consists only from initials."""
def _is_initial(name_variation):
return len(name_variation) == 1 or u'.' in name_variation
parsed_name = ParsedName.loads(name)
return all([_is_initial(name_part) for name_part in parsed_name]) | python | def _name_variation_has_only_initials(name):
"""Detects whether the name variation consists only from initials."""
def _is_initial(name_variation):
return len(name_variation) == 1 or u'.' in name_variation
parsed_name = ParsedName.loads(name)
return all([_is_initial(name_part) for name_part in parsed_name]) | [
"def",
"_name_variation_has_only_initials",
"(",
"name",
")",
":",
"def",
"_is_initial",
"(",
"name_variation",
")",
":",
"return",
"len",
"(",
"name_variation",
")",
"==",
"1",
"or",
"u'.'",
"in",
"name_variation",
"parsed_name",
"=",
"ParsedName",
".",
"loads",
"(",
"name",
")",
"return",
"all",
"(",
"[",
"_is_initial",
"(",
"name_part",
")",
"for",
"name_part",
"in",
"parsed_name",
"]",
")"
] | Detects whether the name variation consists only from initials. | [
"Detects",
"whether",
"the",
"name",
"variation",
"consists",
"only",
"from",
"initials",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L65-L72 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | generate_minimal_name_variations | def generate_minimal_name_variations(author_name):
"""Generate a small number of name variations.
Notes:
Unidecodes the name, so that we use its transliterated version, since this is how the field is being indexed.
For names with more than one part, {lastname} x {non lastnames, non lastnames initial} variations.
Additionally, it generates the swapped version of those, for supporting queries like ``Mele Salvatore`` which
``ParsedName`` parses as lastname: Salvatore and firstname: Mele. So in those cases, we need to generate both
``Mele, Salvatore`` and ``Mele, S``.
Wherever, the '-' is replaced by ' ', it's done because it's the way the name variations are being index, thus
we want our minimal name variations to be generated identically. This has to be done after the creation of
ParsedName, otherwise the name is parsed differently. E.g. 'Caro-Estevez' as is, it's a lastname, if we replace
the '-' with ' ', then it's a firstname and lastname.
"""
parsed_name = ParsedName.loads(unidecode(author_name))
if len(parsed_name) > 1:
lastnames = parsed_name.last.replace('-', ' ')
non_lastnames = ' '.join(
parsed_name.first_list + parsed_name.suffix_list
)
# Strip extra whitespace added if any of middle_list and suffix_list are empty.
non_lastnames = non_lastnames.strip().replace('-', ' ')
# Adding into a set first, so as to drop identical name variations.
return list({
name_variation.lower()
for name_variation
in [
lastnames + ' ' + non_lastnames,
lastnames + ' ' + non_lastnames[0],
non_lastnames + ' ' + lastnames,
non_lastnames + ' ' + lastnames[0],
]
if not _name_variation_has_only_initials(name_variation)
})
else:
return [parsed_name.dumps().replace('-', ' ').lower()] | python | def generate_minimal_name_variations(author_name):
"""Generate a small number of name variations.
Notes:
Unidecodes the name, so that we use its transliterated version, since this is how the field is being indexed.
For names with more than one part, {lastname} x {non lastnames, non lastnames initial} variations.
Additionally, it generates the swapped version of those, for supporting queries like ``Mele Salvatore`` which
``ParsedName`` parses as lastname: Salvatore and firstname: Mele. So in those cases, we need to generate both
``Mele, Salvatore`` and ``Mele, S``.
Wherever, the '-' is replaced by ' ', it's done because it's the way the name variations are being index, thus
we want our minimal name variations to be generated identically. This has to be done after the creation of
ParsedName, otherwise the name is parsed differently. E.g. 'Caro-Estevez' as is, it's a lastname, if we replace
the '-' with ' ', then it's a firstname and lastname.
"""
parsed_name = ParsedName.loads(unidecode(author_name))
if len(parsed_name) > 1:
lastnames = parsed_name.last.replace('-', ' ')
non_lastnames = ' '.join(
parsed_name.first_list + parsed_name.suffix_list
)
# Strip extra whitespace added if any of middle_list and suffix_list are empty.
non_lastnames = non_lastnames.strip().replace('-', ' ')
# Adding into a set first, so as to drop identical name variations.
return list({
name_variation.lower()
for name_variation
in [
lastnames + ' ' + non_lastnames,
lastnames + ' ' + non_lastnames[0],
non_lastnames + ' ' + lastnames,
non_lastnames + ' ' + lastnames[0],
]
if not _name_variation_has_only_initials(name_variation)
})
else:
return [parsed_name.dumps().replace('-', ' ').lower()] | [
"def",
"generate_minimal_name_variations",
"(",
"author_name",
")",
":",
"parsed_name",
"=",
"ParsedName",
".",
"loads",
"(",
"unidecode",
"(",
"author_name",
")",
")",
"if",
"len",
"(",
"parsed_name",
")",
">",
"1",
":",
"lastnames",
"=",
"parsed_name",
".",
"last",
".",
"replace",
"(",
"'-'",
",",
"' '",
")",
"non_lastnames",
"=",
"' '",
".",
"join",
"(",
"parsed_name",
".",
"first_list",
"+",
"parsed_name",
".",
"suffix_list",
")",
"# Strip extra whitespace added if any of middle_list and suffix_list are empty.",
"non_lastnames",
"=",
"non_lastnames",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"'-'",
",",
"' '",
")",
"# Adding into a set first, so as to drop identical name variations.",
"return",
"list",
"(",
"{",
"name_variation",
".",
"lower",
"(",
")",
"for",
"name_variation",
"in",
"[",
"lastnames",
"+",
"' '",
"+",
"non_lastnames",
",",
"lastnames",
"+",
"' '",
"+",
"non_lastnames",
"[",
"0",
"]",
",",
"non_lastnames",
"+",
"' '",
"+",
"lastnames",
",",
"non_lastnames",
"+",
"' '",
"+",
"lastnames",
"[",
"0",
"]",
",",
"]",
"if",
"not",
"_name_variation_has_only_initials",
"(",
"name_variation",
")",
"}",
")",
"else",
":",
"return",
"[",
"parsed_name",
".",
"dumps",
"(",
")",
".",
"replace",
"(",
"'-'",
",",
"' '",
")",
".",
"lower",
"(",
")",
"]"
] | Generate a small number of name variations.
Notes:
Unidecodes the name, so that we use its transliterated version, since this is how the field is being indexed.
For names with more than one part, {lastname} x {non lastnames, non lastnames initial} variations.
Additionally, it generates the swapped version of those, for supporting queries like ``Mele Salvatore`` which
``ParsedName`` parses as lastname: Salvatore and firstname: Mele. So in those cases, we need to generate both
``Mele, Salvatore`` and ``Mele, S``.
Wherever, the '-' is replaced by ' ', it's done because it's the way the name variations are being index, thus
we want our minimal name variations to be generated identically. This has to be done after the creation of
ParsedName, otherwise the name is parsed differently. E.g. 'Caro-Estevez' as is, it's a lastname, if we replace
the '-' with ' ', then it's a firstname and lastname. | [
"Generate",
"a",
"small",
"number",
"of",
"name",
"variations",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L75-L115 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | register_date_conversion_handler | def register_date_conversion_handler(date_specifier_patterns):
"""Decorator for registering handlers that convert text dates to dates.
Args:
date_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered
"""
def _decorator(func):
global DATE_SPECIFIERS_CONVERSION_HANDLERS
DATE_SPECIFIERS_CONVERSION_HANDLERS[DATE_SPECIFIERS_REGEXES[date_specifier_patterns]] = func
return func
return _decorator | python | def register_date_conversion_handler(date_specifier_patterns):
"""Decorator for registering handlers that convert text dates to dates.
Args:
date_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered
"""
def _decorator(func):
global DATE_SPECIFIERS_CONVERSION_HANDLERS
DATE_SPECIFIERS_CONVERSION_HANDLERS[DATE_SPECIFIERS_REGEXES[date_specifier_patterns]] = func
return func
return _decorator | [
"def",
"register_date_conversion_handler",
"(",
"date_specifier_patterns",
")",
":",
"def",
"_decorator",
"(",
"func",
")",
":",
"global",
"DATE_SPECIFIERS_CONVERSION_HANDLERS",
"DATE_SPECIFIERS_CONVERSION_HANDLERS",
"[",
"DATE_SPECIFIERS_REGEXES",
"[",
"date_specifier_patterns",
"]",
"]",
"=",
"func",
"return",
"func",
"return",
"_decorator"
] | Decorator for registering handlers that convert text dates to dates.
Args:
date_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered | [
"Decorator",
"for",
"registering",
"handlers",
"that",
"convert",
"text",
"dates",
"to",
"dates",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L139-L151 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | _truncate_wildcard_from_date | def _truncate_wildcard_from_date(date_value):
"""Truncate wildcard from date parts.
Returns:
(str) The truncated date.
Raises:
ValueError, on either unsupported date separator (currently only ' ' and '-' are supported), or if there's a
wildcard in the year.
Notes:
Either whole date part is wildcard, in which we ignore it and do a range query on the
remaining parts, or some numbers are wildcards, where again, we ignore this part.
"""
if ' ' in date_value:
date_parts = date_value.split(' ')
elif '-' in date_value:
date_parts = date_value.split('-')
else:
# Either unsupported separators or wildcard in year, e.g. '201*'.
raise ValueError("Erroneous date value: %s.", date_value)
if GenericValue.WILDCARD_TOKEN in date_parts[-1]:
del date_parts[-1]
return '-'.join(date_parts) | python | def _truncate_wildcard_from_date(date_value):
"""Truncate wildcard from date parts.
Returns:
(str) The truncated date.
Raises:
ValueError, on either unsupported date separator (currently only ' ' and '-' are supported), or if there's a
wildcard in the year.
Notes:
Either whole date part is wildcard, in which we ignore it and do a range query on the
remaining parts, or some numbers are wildcards, where again, we ignore this part.
"""
if ' ' in date_value:
date_parts = date_value.split(' ')
elif '-' in date_value:
date_parts = date_value.split('-')
else:
# Either unsupported separators or wildcard in year, e.g. '201*'.
raise ValueError("Erroneous date value: %s.", date_value)
if GenericValue.WILDCARD_TOKEN in date_parts[-1]:
del date_parts[-1]
return '-'.join(date_parts) | [
"def",
"_truncate_wildcard_from_date",
"(",
"date_value",
")",
":",
"if",
"' '",
"in",
"date_value",
":",
"date_parts",
"=",
"date_value",
".",
"split",
"(",
"' '",
")",
"elif",
"'-'",
"in",
"date_value",
":",
"date_parts",
"=",
"date_value",
".",
"split",
"(",
"'-'",
")",
"else",
":",
"# Either unsupported separators or wildcard in year, e.g. '201*'.",
"raise",
"ValueError",
"(",
"\"Erroneous date value: %s.\"",
",",
"date_value",
")",
"if",
"GenericValue",
".",
"WILDCARD_TOKEN",
"in",
"date_parts",
"[",
"-",
"1",
"]",
":",
"del",
"date_parts",
"[",
"-",
"1",
"]",
"return",
"'-'",
".",
"join",
"(",
"date_parts",
")"
] | Truncate wildcard from date parts.
Returns:
(str) The truncated date.
Raises:
ValueError, on either unsupported date separator (currently only ' ' and '-' are supported), or if there's a
wildcard in the year.
Notes:
Either whole date part is wildcard, in which we ignore it and do a range query on the
remaining parts, or some numbers are wildcards, where again, we ignore this part. | [
"Truncate",
"wildcard",
"from",
"date",
"parts",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L226-L251 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | _truncate_date_value_according_on_date_field | def _truncate_date_value_according_on_date_field(field, date_value):
"""Truncates date value (to year only) according to the given date field.
Args:
field (unicode): The field for which the date value will be used to query on.
date_value (str): The date value that is going to be truncated to its year.
Returns:
PartialDate: The possibly truncated date, on success. None, otherwise.
Notes:
In case the fieldname is in `ES_MAPPING_HEP_DATE_ONLY_YEAR`, then the date is normalized and then only its year
value is used. This is needed for ElasticSearch to be able to do comparisons on dates that have only year, which
fails if being queried with a date with more .
"""
try:
partial_date = PartialDate.parse(date_value)
except ValueError:
return None
if field in ES_MAPPING_HEP_DATE_ONLY_YEAR:
truncated_date = PartialDate.from_parts(partial_date.year)
else:
truncated_date = partial_date
return truncated_date | python | def _truncate_date_value_according_on_date_field(field, date_value):
"""Truncates date value (to year only) according to the given date field.
Args:
field (unicode): The field for which the date value will be used to query on.
date_value (str): The date value that is going to be truncated to its year.
Returns:
PartialDate: The possibly truncated date, on success. None, otherwise.
Notes:
In case the fieldname is in `ES_MAPPING_HEP_DATE_ONLY_YEAR`, then the date is normalized and then only its year
value is used. This is needed for ElasticSearch to be able to do comparisons on dates that have only year, which
fails if being queried with a date with more .
"""
try:
partial_date = PartialDate.parse(date_value)
except ValueError:
return None
if field in ES_MAPPING_HEP_DATE_ONLY_YEAR:
truncated_date = PartialDate.from_parts(partial_date.year)
else:
truncated_date = partial_date
return truncated_date | [
"def",
"_truncate_date_value_according_on_date_field",
"(",
"field",
",",
"date_value",
")",
":",
"try",
":",
"partial_date",
"=",
"PartialDate",
".",
"parse",
"(",
"date_value",
")",
"except",
"ValueError",
":",
"return",
"None",
"if",
"field",
"in",
"ES_MAPPING_HEP_DATE_ONLY_YEAR",
":",
"truncated_date",
"=",
"PartialDate",
".",
"from_parts",
"(",
"partial_date",
".",
"year",
")",
"else",
":",
"truncated_date",
"=",
"partial_date",
"return",
"truncated_date"
] | Truncates date value (to year only) according to the given date field.
Args:
field (unicode): The field for which the date value will be used to query on.
date_value (str): The date value that is going to be truncated to its year.
Returns:
PartialDate: The possibly truncated date, on success. None, otherwise.
Notes:
In case the fieldname is in `ES_MAPPING_HEP_DATE_ONLY_YEAR`, then the date is normalized and then only its year
value is used. This is needed for ElasticSearch to be able to do comparisons on dates that have only year, which
fails if being queried with a date with more . | [
"Truncates",
"date",
"value",
"(",
"to",
"year",
"only",
")",
"according",
"to",
"the",
"given",
"date",
"field",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L254-L279 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | _get_next_date_from_partial_date | def _get_next_date_from_partial_date(partial_date):
"""Calculates the next date from the given partial date.
Args:
partial_date (inspire_utils.date.PartialDate): The partial date whose next date should be calculated.
Returns:
PartialDate: The next date from the given partial date.
"""
relativedelta_arg = 'years'
if partial_date.month:
relativedelta_arg = 'months'
if partial_date.day:
relativedelta_arg = 'days'
next_date = parse(partial_date.dumps()) + relativedelta(**{relativedelta_arg: 1})
return PartialDate.from_parts(
next_date.year,
next_date.month if partial_date.month else None,
next_date.day if partial_date.day else None
) | python | def _get_next_date_from_partial_date(partial_date):
"""Calculates the next date from the given partial date.
Args:
partial_date (inspire_utils.date.PartialDate): The partial date whose next date should be calculated.
Returns:
PartialDate: The next date from the given partial date.
"""
relativedelta_arg = 'years'
if partial_date.month:
relativedelta_arg = 'months'
if partial_date.day:
relativedelta_arg = 'days'
next_date = parse(partial_date.dumps()) + relativedelta(**{relativedelta_arg: 1})
return PartialDate.from_parts(
next_date.year,
next_date.month if partial_date.month else None,
next_date.day if partial_date.day else None
) | [
"def",
"_get_next_date_from_partial_date",
"(",
"partial_date",
")",
":",
"relativedelta_arg",
"=",
"'years'",
"if",
"partial_date",
".",
"month",
":",
"relativedelta_arg",
"=",
"'months'",
"if",
"partial_date",
".",
"day",
":",
"relativedelta_arg",
"=",
"'days'",
"next_date",
"=",
"parse",
"(",
"partial_date",
".",
"dumps",
"(",
")",
")",
"+",
"relativedelta",
"(",
"*",
"*",
"{",
"relativedelta_arg",
":",
"1",
"}",
")",
"return",
"PartialDate",
".",
"from_parts",
"(",
"next_date",
".",
"year",
",",
"next_date",
".",
"month",
"if",
"partial_date",
".",
"month",
"else",
"None",
",",
"next_date",
".",
"day",
"if",
"partial_date",
".",
"day",
"else",
"None",
")"
] | Calculates the next date from the given partial date.
Args:
partial_date (inspire_utils.date.PartialDate): The partial date whose next date should be calculated.
Returns:
PartialDate: The next date from the given partial date. | [
"Calculates",
"the",
"next",
"date",
"from",
"the",
"given",
"partial",
"date",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L282-L303 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | _get_proper_elastic_search_date_rounding_format | def _get_proper_elastic_search_date_rounding_format(partial_date):
"""Returns the proper ES date math unit according to the "resolution" of the partial_date.
Args:
partial_date (PartialDate): The partial date for which the date math unit is.
Returns:
(str): The ES date math unit format.
Notes:
This is needed for supporting range queries on dates, i.e. rounding them up or down according to
the ES range operator.
For example, without this, a query like 'date > 2010-11', would return documents with date '2010-11-15', due to
the date value of the query being interpreted by ES as '2010-11-01 01:00:00'. By using the suffixes for rounding
up or down, the date value of the query is interpreted as '2010-11-30T23:59:59.999', thus not returning the
document with date '2010-11-15', as the user would expect. See:
https://www.elastic.co/guide/en/elasticsearch/reference/6.1/query-dsl-range-query.html#_date_math_and_rounding
"""
es_date_math_unit = ES_DATE_MATH_ROUNDING_YEAR
if partial_date.month:
es_date_math_unit = ES_DATE_MATH_ROUNDING_MONTH
if partial_date.day:
es_date_math_unit = ES_DATE_MATH_ROUNDING_DAY
return es_date_math_unit | python | def _get_proper_elastic_search_date_rounding_format(partial_date):
"""Returns the proper ES date math unit according to the "resolution" of the partial_date.
Args:
partial_date (PartialDate): The partial date for which the date math unit is.
Returns:
(str): The ES date math unit format.
Notes:
This is needed for supporting range queries on dates, i.e. rounding them up or down according to
the ES range operator.
For example, without this, a query like 'date > 2010-11', would return documents with date '2010-11-15', due to
the date value of the query being interpreted by ES as '2010-11-01 01:00:00'. By using the suffixes for rounding
up or down, the date value of the query is interpreted as '2010-11-30T23:59:59.999', thus not returning the
document with date '2010-11-15', as the user would expect. See:
https://www.elastic.co/guide/en/elasticsearch/reference/6.1/query-dsl-range-query.html#_date_math_and_rounding
"""
es_date_math_unit = ES_DATE_MATH_ROUNDING_YEAR
if partial_date.month:
es_date_math_unit = ES_DATE_MATH_ROUNDING_MONTH
if partial_date.day:
es_date_math_unit = ES_DATE_MATH_ROUNDING_DAY
return es_date_math_unit | [
"def",
"_get_proper_elastic_search_date_rounding_format",
"(",
"partial_date",
")",
":",
"es_date_math_unit",
"=",
"ES_DATE_MATH_ROUNDING_YEAR",
"if",
"partial_date",
".",
"month",
":",
"es_date_math_unit",
"=",
"ES_DATE_MATH_ROUNDING_MONTH",
"if",
"partial_date",
".",
"day",
":",
"es_date_math_unit",
"=",
"ES_DATE_MATH_ROUNDING_DAY",
"return",
"es_date_math_unit"
] | Returns the proper ES date math unit according to the "resolution" of the partial_date.
Args:
partial_date (PartialDate): The partial date for which the date math unit is.
Returns:
(str): The ES date math unit format.
Notes:
This is needed for supporting range queries on dates, i.e. rounding them up or down according to
the ES range operator.
For example, without this, a query like 'date > 2010-11', would return documents with date '2010-11-15', due to
the date value of the query being interpreted by ES as '2010-11-01 01:00:00'. By using the suffixes for rounding
up or down, the date value of the query is interpreted as '2010-11-30T23:59:59.999', thus not returning the
document with date '2010-11-15', as the user would expect. See:
https://www.elastic.co/guide/en/elasticsearch/reference/6.1/query-dsl-range-query.html#_date_math_and_rounding | [
"Returns",
"the",
"proper",
"ES",
"date",
"math",
"unit",
"according",
"to",
"the",
"resolution",
"of",
"the",
"partial_date",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L306-L331 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | update_date_value_in_operator_value_pairs_for_fieldname | def update_date_value_in_operator_value_pairs_for_fieldname(field, operator_value_pairs):
"""Updates (operator, date value) pairs by normalizing the date value according to the given field.
Args:
field (unicode): The fieldname for which the operator-value pairs are being generated.
operator_value_pairs (dict): ES range operator {'gt', 'gte', 'lt', 'lte'} along with a value.
Additionally, if the operator is ``ES_RANGE_EQ_OPERATOR``, then it is indicated that the method should
generate both a lower and an upper bound operator-value pairs, with the given date_value.
Notes:
On a ``ValueError`` an empty operator_value_pairs is returned.
"""
updated_operator_value_pairs = {}
for operator, value in operator_value_pairs.items():
modified_date = _truncate_date_value_according_on_date_field(field, value)
if not modified_date:
return {}
if operator == ES_RANGE_EQ_OPERATOR:
updated_operator_value_pairs['gte'] = \
modified_date.dumps() + _get_proper_elastic_search_date_rounding_format(modified_date)
next_date = _get_next_date_from_partial_date(modified_date)
updated_operator_value_pairs['lt'] = \
next_date.dumps() + _get_proper_elastic_search_date_rounding_format(next_date)
else:
updated_operator_value_pairs[operator] = \
modified_date.dumps() + _get_proper_elastic_search_date_rounding_format(modified_date)
return updated_operator_value_pairs | python | def update_date_value_in_operator_value_pairs_for_fieldname(field, operator_value_pairs):
"""Updates (operator, date value) pairs by normalizing the date value according to the given field.
Args:
field (unicode): The fieldname for which the operator-value pairs are being generated.
operator_value_pairs (dict): ES range operator {'gt', 'gte', 'lt', 'lte'} along with a value.
Additionally, if the operator is ``ES_RANGE_EQ_OPERATOR``, then it is indicated that the method should
generate both a lower and an upper bound operator-value pairs, with the given date_value.
Notes:
On a ``ValueError`` an empty operator_value_pairs is returned.
"""
updated_operator_value_pairs = {}
for operator, value in operator_value_pairs.items():
modified_date = _truncate_date_value_according_on_date_field(field, value)
if not modified_date:
return {}
if operator == ES_RANGE_EQ_OPERATOR:
updated_operator_value_pairs['gte'] = \
modified_date.dumps() + _get_proper_elastic_search_date_rounding_format(modified_date)
next_date = _get_next_date_from_partial_date(modified_date)
updated_operator_value_pairs['lt'] = \
next_date.dumps() + _get_proper_elastic_search_date_rounding_format(next_date)
else:
updated_operator_value_pairs[operator] = \
modified_date.dumps() + _get_proper_elastic_search_date_rounding_format(modified_date)
return updated_operator_value_pairs | [
"def",
"update_date_value_in_operator_value_pairs_for_fieldname",
"(",
"field",
",",
"operator_value_pairs",
")",
":",
"updated_operator_value_pairs",
"=",
"{",
"}",
"for",
"operator",
",",
"value",
"in",
"operator_value_pairs",
".",
"items",
"(",
")",
":",
"modified_date",
"=",
"_truncate_date_value_according_on_date_field",
"(",
"field",
",",
"value",
")",
"if",
"not",
"modified_date",
":",
"return",
"{",
"}",
"if",
"operator",
"==",
"ES_RANGE_EQ_OPERATOR",
":",
"updated_operator_value_pairs",
"[",
"'gte'",
"]",
"=",
"modified_date",
".",
"dumps",
"(",
")",
"+",
"_get_proper_elastic_search_date_rounding_format",
"(",
"modified_date",
")",
"next_date",
"=",
"_get_next_date_from_partial_date",
"(",
"modified_date",
")",
"updated_operator_value_pairs",
"[",
"'lt'",
"]",
"=",
"next_date",
".",
"dumps",
"(",
")",
"+",
"_get_proper_elastic_search_date_rounding_format",
"(",
"next_date",
")",
"else",
":",
"updated_operator_value_pairs",
"[",
"operator",
"]",
"=",
"modified_date",
".",
"dumps",
"(",
")",
"+",
"_get_proper_elastic_search_date_rounding_format",
"(",
"modified_date",
")",
"return",
"updated_operator_value_pairs"
] | Updates (operator, date value) pairs by normalizing the date value according to the given field.
Args:
field (unicode): The fieldname for which the operator-value pairs are being generated.
operator_value_pairs (dict): ES range operator {'gt', 'gte', 'lt', 'lte'} along with a value.
Additionally, if the operator is ``ES_RANGE_EQ_OPERATOR``, then it is indicated that the method should
generate both a lower and an upper bound operator-value pairs, with the given date_value.
Notes:
On a ``ValueError`` an empty operator_value_pairs is returned. | [
"Updates",
"(",
"operator",
"date",
"value",
")",
"pairs",
"by",
"normalizing",
"the",
"date",
"value",
"according",
"to",
"the",
"given",
"field",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L334-L363 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | generate_match_query | def generate_match_query(field, value, with_operator_and):
"""Helper for generating a match query.
Args:
field (six.text_type): The ES field to be queried.
value (six.text_type/bool): The value of the query (bool for the case of type-code query ["core: true"]).
with_operator_and (bool): Flag that signifies whether to generate the explicit notation of the query, along
with '"operator": "and"', so that all tokens of the query value are required to match.
Notes:
If value is of instance bool, then the shortened version of the match query is generated, at all times.
"""
parsed_value = None
try:
parsed_value = json.loads(value.lower())
except (ValueError, TypeError, AttributeError):
# Catch all possible exceptions
# we are not interested if they will appear
pass
if isinstance(value, bool):
return {'match': {field: value}}
elif isinstance(parsed_value, bool):
return {'match': {field: value.lower()}}
if with_operator_and:
return {
'match': {
field: {
'query': value,
'operator': 'and'
}
}
}
return {'match': {field: value}} | python | def generate_match_query(field, value, with_operator_and):
"""Helper for generating a match query.
Args:
field (six.text_type): The ES field to be queried.
value (six.text_type/bool): The value of the query (bool for the case of type-code query ["core: true"]).
with_operator_and (bool): Flag that signifies whether to generate the explicit notation of the query, along
with '"operator": "and"', so that all tokens of the query value are required to match.
Notes:
If value is of instance bool, then the shortened version of the match query is generated, at all times.
"""
parsed_value = None
try:
parsed_value = json.loads(value.lower())
except (ValueError, TypeError, AttributeError):
# Catch all possible exceptions
# we are not interested if they will appear
pass
if isinstance(value, bool):
return {'match': {field: value}}
elif isinstance(parsed_value, bool):
return {'match': {field: value.lower()}}
if with_operator_and:
return {
'match': {
field: {
'query': value,
'operator': 'and'
}
}
}
return {'match': {field: value}} | [
"def",
"generate_match_query",
"(",
"field",
",",
"value",
",",
"with_operator_and",
")",
":",
"parsed_value",
"=",
"None",
"try",
":",
"parsed_value",
"=",
"json",
".",
"loads",
"(",
"value",
".",
"lower",
"(",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
",",
"AttributeError",
")",
":",
"# Catch all possible exceptions",
"# we are not interested if they will appear",
"pass",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"return",
"{",
"'match'",
":",
"{",
"field",
":",
"value",
"}",
"}",
"elif",
"isinstance",
"(",
"parsed_value",
",",
"bool",
")",
":",
"return",
"{",
"'match'",
":",
"{",
"field",
":",
"value",
".",
"lower",
"(",
")",
"}",
"}",
"if",
"with_operator_and",
":",
"return",
"{",
"'match'",
":",
"{",
"field",
":",
"{",
"'query'",
":",
"value",
",",
"'operator'",
":",
"'and'",
"}",
"}",
"}",
"return",
"{",
"'match'",
":",
"{",
"field",
":",
"value",
"}",
"}"
] | Helper for generating a match query.
Args:
field (six.text_type): The ES field to be queried.
value (six.text_type/bool): The value of the query (bool for the case of type-code query ["core: true"]).
with_operator_and (bool): Flag that signifies whether to generate the explicit notation of the query, along
with '"operator": "and"', so that all tokens of the query value are required to match.
Notes:
If value is of instance bool, then the shortened version of the match query is generated, at all times. | [
"Helper",
"for",
"generating",
"a",
"match",
"query",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L367-L402 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | wrap_queries_in_bool_clauses_if_more_than_one | def wrap_queries_in_bool_clauses_if_more_than_one(queries,
use_must_clause,
preserve_bool_semantics_if_one_clause=False):
"""Helper for wrapping a list of queries into a bool.{must, should} clause.
Args:
queries (list): List of queries to be wrapped in a bool.{must, should} clause.
use_must_clause (bool): Flag that signifies whether to use 'must' or 'should' clause.
preserve_bool_semantics_if_one_clause (bool): Flag that signifies whether to generate a bool query even if
there's only one clause. This happens to generate boolean query semantics. Usually not the case, but
useful for boolean queries support.
Returns:
(dict): If len(queries) > 1, the bool clause, otherwise if len(queries) == 1, will return the query itself,
while finally, if len(queries) == 0, then an empty dictionary is returned.
"""
if not queries:
return {}
if len(queries) == 1 and not preserve_bool_semantics_if_one_clause:
return queries[0]
return {
'bool': {
('must' if use_must_clause else 'should'): queries
}
} | python | def wrap_queries_in_bool_clauses_if_more_than_one(queries,
use_must_clause,
preserve_bool_semantics_if_one_clause=False):
"""Helper for wrapping a list of queries into a bool.{must, should} clause.
Args:
queries (list): List of queries to be wrapped in a bool.{must, should} clause.
use_must_clause (bool): Flag that signifies whether to use 'must' or 'should' clause.
preserve_bool_semantics_if_one_clause (bool): Flag that signifies whether to generate a bool query even if
there's only one clause. This happens to generate boolean query semantics. Usually not the case, but
useful for boolean queries support.
Returns:
(dict): If len(queries) > 1, the bool clause, otherwise if len(queries) == 1, will return the query itself,
while finally, if len(queries) == 0, then an empty dictionary is returned.
"""
if not queries:
return {}
if len(queries) == 1 and not preserve_bool_semantics_if_one_clause:
return queries[0]
return {
'bool': {
('must' if use_must_clause else 'should'): queries
}
} | [
"def",
"wrap_queries_in_bool_clauses_if_more_than_one",
"(",
"queries",
",",
"use_must_clause",
",",
"preserve_bool_semantics_if_one_clause",
"=",
"False",
")",
":",
"if",
"not",
"queries",
":",
"return",
"{",
"}",
"if",
"len",
"(",
"queries",
")",
"==",
"1",
"and",
"not",
"preserve_bool_semantics_if_one_clause",
":",
"return",
"queries",
"[",
"0",
"]",
"return",
"{",
"'bool'",
":",
"{",
"(",
"'must'",
"if",
"use_must_clause",
"else",
"'should'",
")",
":",
"queries",
"}",
"}"
] | Helper for wrapping a list of queries into a bool.{must, should} clause.
Args:
queries (list): List of queries to be wrapped in a bool.{must, should} clause.
use_must_clause (bool): Flag that signifies whether to use 'must' or 'should' clause.
preserve_bool_semantics_if_one_clause (bool): Flag that signifies whether to generate a bool query even if
there's only one clause. This happens to generate boolean query semantics. Usually not the case, but
useful for boolean queries support.
Returns:
(dict): If len(queries) > 1, the bool clause, otherwise if len(queries) == 1, will return the query itself,
while finally, if len(queries) == 0, then an empty dictionary is returned. | [
"Helper",
"for",
"wrapping",
"a",
"list",
"of",
"queries",
"into",
"a",
"bool",
".",
"{",
"must",
"should",
"}",
"clause",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L422-L448 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/visitor_utils.py | wrap_query_in_nested_if_field_is_nested | def wrap_query_in_nested_if_field_is_nested(query, field, nested_fields):
"""Helper for wrapping a query into a nested if the fields within the query are nested
Args:
query : The query to be wrapped.
field : The field that is being queried.
nested_fields : List of fields which are nested.
Returns:
(dict): The nested query
"""
for element in nested_fields:
match_pattern = r'^{}.'.format(element)
if re.match(match_pattern, field):
return generate_nested_query(element, query)
return query | python | def wrap_query_in_nested_if_field_is_nested(query, field, nested_fields):
"""Helper for wrapping a query into a nested if the fields within the query are nested
Args:
query : The query to be wrapped.
field : The field that is being queried.
nested_fields : List of fields which are nested.
Returns:
(dict): The nested query
"""
for element in nested_fields:
match_pattern = r'^{}.'.format(element)
if re.match(match_pattern, field):
return generate_nested_query(element, query)
return query | [
"def",
"wrap_query_in_nested_if_field_is_nested",
"(",
"query",
",",
"field",
",",
"nested_fields",
")",
":",
"for",
"element",
"in",
"nested_fields",
":",
"match_pattern",
"=",
"r'^{}.'",
".",
"format",
"(",
"element",
")",
"if",
"re",
".",
"match",
"(",
"match_pattern",
",",
"field",
")",
":",
"return",
"generate_nested_query",
"(",
"element",
",",
"query",
")",
"return",
"query"
] | Helper for wrapping a query into a nested if the fields within the query are nested
Args:
query : The query to be wrapped.
field : The field that is being queried.
nested_fields : List of fields which are nested.
Returns:
(dict): The nested query | [
"Helper",
"for",
"wrapping",
"a",
"query",
"into",
"a",
"nested",
"if",
"the",
"fields",
"within",
"the",
"query",
"are",
"nested"
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/visitor_utils.py#L451-L466 |
praekeltfoundation/seed-stage-based-messaging | subscriptions/management/commands/remove_duplicate_subscriptions.py | Command.is_within_limits | def is_within_limits(self, limit, date, dates):
"""
Returns True if the difference between date and any value in dates
is less than or equal to limit.
"""
return any((self.second_diff(date, d) <= limit for d in dates)) | python | def is_within_limits(self, limit, date, dates):
"""
Returns True if the difference between date and any value in dates
is less than or equal to limit.
"""
return any((self.second_diff(date, d) <= limit for d in dates)) | [
"def",
"is_within_limits",
"(",
"self",
",",
"limit",
",",
"date",
",",
"dates",
")",
":",
"return",
"any",
"(",
"(",
"self",
".",
"second_diff",
"(",
"date",
",",
"d",
")",
"<=",
"limit",
"for",
"d",
"in",
"dates",
")",
")"
] | Returns True if the difference between date and any value in dates
is less than or equal to limit. | [
"Returns",
"True",
"if",
"the",
"difference",
"between",
"date",
"and",
"any",
"value",
"in",
"dates",
"is",
"less",
"than",
"or",
"equal",
"to",
"limit",
"."
] | train | https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/subscriptions/management/commands/remove_duplicate_subscriptions.py#L79-L84 |
inspirehep/inspire-query-parser | inspire_query_parser/utils/format_parse_tree.py | emit_tree_format | def emit_tree_format(tree, verbose=False):
"""Returns a tree representation of a parse tree.
Arguments:
tree: the parse tree whose tree representation is to be generated
verbose (bool): if True prints the parse tree to be formatted
Returns:
str: tree-like representation of the parse tree
"""
if verbose:
print("Converting: " + repr(tree))
ret_str = __recursive_formatter(tree)
return ret_str | python | def emit_tree_format(tree, verbose=False):
"""Returns a tree representation of a parse tree.
Arguments:
tree: the parse tree whose tree representation is to be generated
verbose (bool): if True prints the parse tree to be formatted
Returns:
str: tree-like representation of the parse tree
"""
if verbose:
print("Converting: " + repr(tree))
ret_str = __recursive_formatter(tree)
return ret_str | [
"def",
"emit_tree_format",
"(",
"tree",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Converting: \"",
"+",
"repr",
"(",
"tree",
")",
")",
"ret_str",
"=",
"__recursive_formatter",
"(",
"tree",
")",
"return",
"ret_str"
] | Returns a tree representation of a parse tree.
Arguments:
tree: the parse tree whose tree representation is to be generated
verbose (bool): if True prints the parse tree to be formatted
Returns:
str: tree-like representation of the parse tree | [
"Returns",
"a",
"tree",
"representation",
"of",
"a",
"parse",
"tree",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/utils/format_parse_tree.py#L34-L47 |
praekeltfoundation/seed-stage-based-messaging | seed_stage_based_messaging/utils.py | calculate_retry_delay | def calculate_retry_delay(attempt, max_delay=300):
"""Calculates an exponential backoff for retry attempts with a small
amount of jitter."""
delay = int(random.uniform(2, 4) ** attempt)
if delay > max_delay:
# After reaching the max delay, stop using expontential growth
# and keep the delay nearby the max.
delay = int(random.uniform(max_delay - 20, max_delay + 20))
return delay | python | def calculate_retry_delay(attempt, max_delay=300):
"""Calculates an exponential backoff for retry attempts with a small
amount of jitter."""
delay = int(random.uniform(2, 4) ** attempt)
if delay > max_delay:
# After reaching the max delay, stop using expontential growth
# and keep the delay nearby the max.
delay = int(random.uniform(max_delay - 20, max_delay + 20))
return delay | [
"def",
"calculate_retry_delay",
"(",
"attempt",
",",
"max_delay",
"=",
"300",
")",
":",
"delay",
"=",
"int",
"(",
"random",
".",
"uniform",
"(",
"2",
",",
"4",
")",
"**",
"attempt",
")",
"if",
"delay",
">",
"max_delay",
":",
"# After reaching the max delay, stop using expontential growth",
"# and keep the delay nearby the max.",
"delay",
"=",
"int",
"(",
"random",
".",
"uniform",
"(",
"max_delay",
"-",
"20",
",",
"max_delay",
"+",
"20",
")",
")",
"return",
"delay"
] | Calculates an exponential backoff for retry attempts with a small
amount of jitter. | [
"Calculates",
"an",
"exponential",
"backoff",
"for",
"retry",
"attempts",
"with",
"a",
"small",
"amount",
"of",
"jitter",
"."
] | train | https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/seed_stage_based_messaging/utils.py#L52-L60 |
inspirehep/inspire-query-parser | inspire_query_parser/parsing_driver.py | parse_query | def parse_query(query_str):
"""
Drives the whole logic, by parsing, restructuring and finally, generating an ElasticSearch query.
Args:
query_str (six.text_types): the given query to be translated to an ElasticSearch query
Returns:
six.text_types: Return an ElasticSearch query.
Notes:
In case there's an error, an ElasticSearch `multi_match` query is generated with its `query` value, being the
query_str argument.
"""
def _generate_match_all_fields_query():
# Strip colon character (special character for ES)
stripped_query_str = ' '.join(query_str.replace(':', ' ').split())
return {'multi_match': {'query': stripped_query_str, 'fields': ['_all'], 'zero_terms_query': 'all'}}
if not isinstance(query_str, six.text_type):
query_str = six.text_type(query_str.decode('utf-8'))
logger.info('Parsing: "' + query_str + '\".')
parser = StatefulParser()
rst_visitor = RestructuringVisitor()
es_visitor = ElasticSearchVisitor()
try:
unrecognized_text, parse_tree = parser.parse(query_str, Query)
if unrecognized_text: # Usually, should never happen.
msg = 'Parser returned unrecognized text: "' + unrecognized_text + \
'" for query: "' + query_str + '".'
if query_str == unrecognized_text and parse_tree is None:
# Didn't recognize anything.
logger.warn(msg)
return _generate_match_all_fields_query()
else:
msg += 'Continuing with recognized parse tree.'
logger.warn(msg)
except SyntaxError as e:
logger.warn('Parser syntax error (' + six.text_type(e) + ') with query: "' + query_str +
'". Continuing with a match_all with the given query.')
return _generate_match_all_fields_query()
# Try-Catch-all exceptions for visitors, so that search functionality never fails for the user.
try:
restructured_parse_tree = parse_tree.accept(rst_visitor)
logger.debug('Parse tree: \n' + emit_tree_format(restructured_parse_tree))
except Exception as e:
logger.exception(
RestructuringVisitor.__name__ + " crashed" + (": " + six.text_type(e) + ".") if six.text_type(e) else '.'
)
return _generate_match_all_fields_query()
try:
es_query = restructured_parse_tree.accept(es_visitor)
except Exception as e:
logger.exception(
ElasticSearchVisitor.__name__ + " crashed" + (": " + six.text_type(e) + ".") if six.text_type(e) else '.'
)
return _generate_match_all_fields_query()
if not es_query:
# Case where an empty query was generated (i.e. date query with malformed date, e.g. "d < 200").
return _generate_match_all_fields_query()
return es_query | python | def parse_query(query_str):
"""
Drives the whole logic, by parsing, restructuring and finally, generating an ElasticSearch query.
Args:
query_str (six.text_types): the given query to be translated to an ElasticSearch query
Returns:
six.text_types: Return an ElasticSearch query.
Notes:
In case there's an error, an ElasticSearch `multi_match` query is generated with its `query` value, being the
query_str argument.
"""
def _generate_match_all_fields_query():
# Strip colon character (special character for ES)
stripped_query_str = ' '.join(query_str.replace(':', ' ').split())
return {'multi_match': {'query': stripped_query_str, 'fields': ['_all'], 'zero_terms_query': 'all'}}
if not isinstance(query_str, six.text_type):
query_str = six.text_type(query_str.decode('utf-8'))
logger.info('Parsing: "' + query_str + '\".')
parser = StatefulParser()
rst_visitor = RestructuringVisitor()
es_visitor = ElasticSearchVisitor()
try:
unrecognized_text, parse_tree = parser.parse(query_str, Query)
if unrecognized_text: # Usually, should never happen.
msg = 'Parser returned unrecognized text: "' + unrecognized_text + \
'" for query: "' + query_str + '".'
if query_str == unrecognized_text and parse_tree is None:
# Didn't recognize anything.
logger.warn(msg)
return _generate_match_all_fields_query()
else:
msg += 'Continuing with recognized parse tree.'
logger.warn(msg)
except SyntaxError as e:
logger.warn('Parser syntax error (' + six.text_type(e) + ') with query: "' + query_str +
'". Continuing with a match_all with the given query.')
return _generate_match_all_fields_query()
# Try-Catch-all exceptions for visitors, so that search functionality never fails for the user.
try:
restructured_parse_tree = parse_tree.accept(rst_visitor)
logger.debug('Parse tree: \n' + emit_tree_format(restructured_parse_tree))
except Exception as e:
logger.exception(
RestructuringVisitor.__name__ + " crashed" + (": " + six.text_type(e) + ".") if six.text_type(e) else '.'
)
return _generate_match_all_fields_query()
try:
es_query = restructured_parse_tree.accept(es_visitor)
except Exception as e:
logger.exception(
ElasticSearchVisitor.__name__ + " crashed" + (": " + six.text_type(e) + ".") if six.text_type(e) else '.'
)
return _generate_match_all_fields_query()
if not es_query:
# Case where an empty query was generated (i.e. date query with malformed date, e.g. "d < 200").
return _generate_match_all_fields_query()
return es_query | [
"def",
"parse_query",
"(",
"query_str",
")",
":",
"def",
"_generate_match_all_fields_query",
"(",
")",
":",
"# Strip colon character (special character for ES)",
"stripped_query_str",
"=",
"' '",
".",
"join",
"(",
"query_str",
".",
"replace",
"(",
"':'",
",",
"' '",
")",
".",
"split",
"(",
")",
")",
"return",
"{",
"'multi_match'",
":",
"{",
"'query'",
":",
"stripped_query_str",
",",
"'fields'",
":",
"[",
"'_all'",
"]",
",",
"'zero_terms_query'",
":",
"'all'",
"}",
"}",
"if",
"not",
"isinstance",
"(",
"query_str",
",",
"six",
".",
"text_type",
")",
":",
"query_str",
"=",
"six",
".",
"text_type",
"(",
"query_str",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"logger",
".",
"info",
"(",
"'Parsing: \"'",
"+",
"query_str",
"+",
"'\\\".'",
")",
"parser",
"=",
"StatefulParser",
"(",
")",
"rst_visitor",
"=",
"RestructuringVisitor",
"(",
")",
"es_visitor",
"=",
"ElasticSearchVisitor",
"(",
")",
"try",
":",
"unrecognized_text",
",",
"parse_tree",
"=",
"parser",
".",
"parse",
"(",
"query_str",
",",
"Query",
")",
"if",
"unrecognized_text",
":",
"# Usually, should never happen.",
"msg",
"=",
"'Parser returned unrecognized text: \"'",
"+",
"unrecognized_text",
"+",
"'\" for query: \"'",
"+",
"query_str",
"+",
"'\".'",
"if",
"query_str",
"==",
"unrecognized_text",
"and",
"parse_tree",
"is",
"None",
":",
"# Didn't recognize anything.",
"logger",
".",
"warn",
"(",
"msg",
")",
"return",
"_generate_match_all_fields_query",
"(",
")",
"else",
":",
"msg",
"+=",
"'Continuing with recognized parse tree.'",
"logger",
".",
"warn",
"(",
"msg",
")",
"except",
"SyntaxError",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"'Parser syntax error ('",
"+",
"six",
".",
"text_type",
"(",
"e",
")",
"+",
"') with query: \"'",
"+",
"query_str",
"+",
"'\". Continuing with a match_all with the given query.'",
")",
"return",
"_generate_match_all_fields_query",
"(",
")",
"# Try-Catch-all exceptions for visitors, so that search functionality never fails for the user.",
"try",
":",
"restructured_parse_tree",
"=",
"parse_tree",
".",
"accept",
"(",
"rst_visitor",
")",
"logger",
".",
"debug",
"(",
"'Parse tree: \\n'",
"+",
"emit_tree_format",
"(",
"restructured_parse_tree",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"RestructuringVisitor",
".",
"__name__",
"+",
"\" crashed\"",
"+",
"(",
"\": \"",
"+",
"six",
".",
"text_type",
"(",
"e",
")",
"+",
"\".\"",
")",
"if",
"six",
".",
"text_type",
"(",
"e",
")",
"else",
"'.'",
")",
"return",
"_generate_match_all_fields_query",
"(",
")",
"try",
":",
"es_query",
"=",
"restructured_parse_tree",
".",
"accept",
"(",
"es_visitor",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"ElasticSearchVisitor",
".",
"__name__",
"+",
"\" crashed\"",
"+",
"(",
"\": \"",
"+",
"six",
".",
"text_type",
"(",
"e",
")",
"+",
"\".\"",
")",
"if",
"six",
".",
"text_type",
"(",
"e",
")",
"else",
"'.'",
")",
"return",
"_generate_match_all_fields_query",
"(",
")",
"if",
"not",
"es_query",
":",
"# Case where an empty query was generated (i.e. date query with malformed date, e.g. \"d < 200\").",
"return",
"_generate_match_all_fields_query",
"(",
")",
"return",
"es_query"
] | Drives the whole logic, by parsing, restructuring and finally, generating an ElasticSearch query.
Args:
query_str (six.text_types): the given query to be translated to an ElasticSearch query
Returns:
six.text_types: Return an ElasticSearch query.
Notes:
In case there's an error, an ElasticSearch `multi_match` query is generated with its `query` value, being the
query_str argument. | [
"Drives",
"the",
"whole",
"logic",
"by",
"parsing",
"restructuring",
"and",
"finally",
"generating",
"an",
"ElasticSearch",
"query",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/parsing_driver.py#L42-L113 |
jreese/tasky | tasky/loop.py | Tasky.task | def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None | python | def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None | [
"def",
"task",
"(",
"self",
",",
"name_or_class",
":",
"Any",
")",
"->",
"Task",
":",
"if",
"name_or_class",
"in",
"self",
".",
"all_tasks",
":",
"return",
"self",
".",
"all_tasks",
"[",
"name_or_class",
"]",
"try",
":",
"return",
"self",
".",
"all_tasks",
".",
"get",
"(",
"name_or_class",
".",
"__class__",
".",
"__name__",
",",
"None",
")",
"except",
"AttributeError",
":",
"return",
"None"
] | Return a running Task object matching the given name or class. | [
"Return",
"a",
"running",
"Task",
"object",
"matching",
"the",
"given",
"name",
"or",
"class",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L79-L89 |
jreese/tasky | tasky/loop.py | Tasky.init | async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time() | python | async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time() | [
"async",
"def",
"init",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"stats",
"=",
"await",
"self",
".",
"insert",
"(",
"self",
".",
"stats",
")",
"self",
".",
"configuration",
"=",
"await",
"self",
".",
"insert",
"(",
"self",
".",
"configuration",
")",
"if",
"not",
"self",
".",
"executor",
":",
"try",
":",
"max_workers",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'executor_workers'",
")",
"except",
"Exception",
":",
"max_workers",
"=",
"None",
"self",
".",
"executor",
"=",
"ThreadPoolExecutor",
"(",
"max_workers",
"=",
"max_workers",
")",
"for",
"task",
"in",
"self",
".",
"initial_tasks",
":",
"await",
"self",
".",
"insert",
"(",
"task",
")",
"self",
".",
"monitor",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"monitor_tasks",
"(",
")",
")",
"self",
".",
"counters",
"[",
"'alive_since'",
"]",
"=",
"time",
".",
"time",
"(",
")"
] | Initialize configuration and start tasks. | [
"Initialize",
"configuration",
"and",
"start",
"tasks",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L91-L109 |
jreese/tasky | tasky/loop.py | Tasky.insert | async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task | python | async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task | [
"async",
"def",
"insert",
"(",
"self",
",",
"task",
":",
"Task",
")",
"->",
"None",
":",
"if",
"not",
"isinstance",
"(",
"task",
",",
"Task",
")",
":",
"task",
"=",
"task",
"(",
")",
"if",
"task",
".",
"name",
"not",
"in",
"self",
".",
"all_tasks",
":",
"task",
".",
"tasky",
"=",
"self",
"self",
".",
"all_tasks",
"[",
"task",
".",
"name",
"]",
"=",
"task",
"await",
"task",
".",
"init",
"(",
")",
"elif",
"task",
"!=",
"self",
".",
"all_tasks",
"[",
"task",
".",
"name",
"]",
":",
"raise",
"Exception",
"(",
"'Duplicate task %s'",
"%",
"task",
".",
"name",
")",
"if",
"task",
".",
"enabled",
":",
"task",
".",
"task",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"start_task",
"(",
"task",
")",
")",
"self",
".",
"running_tasks",
".",
"add",
"(",
"task",
")",
"else",
":",
"task",
".",
"task",
"=",
"None",
"return",
"task"
] | Insert the given task class into the Tasky event loop. | [
"Insert",
"the",
"given",
"task",
"class",
"into",
"the",
"Tasky",
"event",
"loop",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L111-L132 |
jreese/tasky | tasky/loop.py | Tasky.execute | async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn) | python | async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn) | [
"async",
"def",
"execute",
"(",
"self",
",",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"fn",
"=",
"functools",
".",
"partial",
"(",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"await",
"self",
".",
"loop",
".",
"run_in_executor",
"(",
"self",
".",
"executor",
",",
"fn",
")"
] | Execute an arbitrary function outside the event loop using
a shared Executor. | [
"Execute",
"an",
"arbitrary",
"function",
"outside",
"the",
"event",
"loop",
"using",
"a",
"shared",
"Executor",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L134-L139 |
jreese/tasky | tasky/loop.py | Tasky.run_forever | def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close() | python | def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close() | [
"def",
"run_forever",
"(",
"self",
")",
"->",
"None",
":",
"Log",
".",
"debug",
"(",
"'running event loop until terminated'",
")",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"init",
"(",
")",
")",
"self",
".",
"loop",
".",
"run_forever",
"(",
")",
"self",
".",
"loop",
".",
"close",
"(",
")"
] | Execute the tasky/asyncio event loop until terminated. | [
"Execute",
"the",
"tasky",
"/",
"asyncio",
"event",
"loop",
"until",
"terminated",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L141-L147 |
jreese/tasky | tasky/loop.py | Tasky.run_until_complete | def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close() | python | def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close() | [
"def",
"run_until_complete",
"(",
"self",
")",
"->",
"None",
":",
"Log",
".",
"debug",
"(",
"'running event loop until all tasks completed'",
")",
"self",
".",
"terminate_on_finish",
"=",
"True",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"init",
"(",
")",
")",
"self",
".",
"loop",
".",
"run_forever",
"(",
")",
"self",
".",
"loop",
".",
"close",
"(",
")"
] | Execute the tasky/asyncio event loop until all tasks finish. | [
"Execute",
"the",
"tasky",
"/",
"asyncio",
"event",
"loop",
"until",
"all",
"tasks",
"finish",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L149-L156 |
jreese/tasky | tasky/loop.py | Tasky.run_for_time | def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close() | python | def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close() | [
"def",
"run_for_time",
"(",
"self",
",",
"duration",
":",
"float",
"=",
"10.0",
")",
"->",
"None",
":",
"Log",
".",
"debug",
"(",
"'running event loop for %.1f seconds'",
",",
"duration",
")",
"try",
":",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"init",
"(",
")",
")",
"self",
".",
"loop",
".",
"run_until_complete",
"(",
"asyncio",
".",
"sleep",
"(",
"duration",
")",
")",
"self",
".",
"terminate",
"(",
")",
"self",
".",
"loop",
".",
"run_forever",
"(",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"if",
"not",
"e",
".",
"args",
"[",
"0",
"]",
".",
"startswith",
"(",
"'Event loop stopped'",
")",
":",
"raise",
"finally",
":",
"self",
".",
"loop",
".",
"close",
"(",
")"
] | Execute the tasky/asyncio event loop for `duration` seconds. | [
"Execute",
"the",
"tasky",
"/",
"asyncio",
"event",
"loop",
"for",
"duration",
"seconds",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L158-L173 |
jreese/tasky | tasky/loop.py | Tasky.terminate | def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop() | python | def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop() | [
"def",
"terminate",
"(",
"self",
",",
"*",
",",
"force",
":",
"bool",
"=",
"False",
",",
"timeout",
":",
"float",
"=",
"30.0",
",",
"step",
":",
"float",
"=",
"1.0",
")",
"->",
"None",
":",
"if",
"isinstance",
"(",
"self",
".",
"monitor",
",",
"asyncio",
".",
"Future",
")",
":",
"Log",
".",
"debug",
"(",
"'cancelling task monitor'",
")",
"self",
".",
"monitor",
".",
"cancel",
"(",
")",
"Log",
".",
"debug",
"(",
"'stopping tasks'",
")",
"for",
"task",
"in",
"list",
"(",
"self",
".",
"running_tasks",
")",
":",
"if",
"task",
".",
"task",
".",
"done",
"(",
")",
":",
"Log",
".",
"debug",
"(",
"'task %s already stopped'",
",",
"task",
".",
"name",
")",
"self",
".",
"running_tasks",
".",
"discard",
"(",
"task",
")",
"else",
":",
"Log",
".",
"debug",
"(",
"'asking %s to stop'",
",",
"task",
".",
"name",
")",
"asyncio",
".",
"ensure_future",
"(",
"task",
".",
"stop",
"(",
"force",
"=",
"force",
")",
")",
"if",
"timeout",
">",
"0",
"and",
"(",
"self",
".",
"monitor",
"or",
"self",
".",
"running_tasks",
")",
":",
"Log",
".",
"debug",
"(",
"'waiting %.1f seconds for remaining tasks (%d)...'",
",",
"timeout",
",",
"len",
"(",
"self",
".",
"running_tasks",
")",
")",
"timeout",
"-=",
"step",
"fn",
"=",
"functools",
".",
"partial",
"(",
"self",
".",
"terminate",
",",
"force",
"=",
"force",
",",
"timeout",
"=",
"timeout",
",",
"step",
"=",
"step",
")",
"return",
"self",
".",
"loop",
".",
"call_later",
"(",
"step",
",",
"fn",
")",
"if",
"timeout",
">",
"0",
":",
"Log",
".",
"debug",
"(",
"'all tasks completed, stopping event loop'",
")",
"else",
":",
"Log",
".",
"debug",
"(",
"'timed out waiting for tasks, stopping event loop'",
")",
"self",
".",
"loop",
".",
"stop",
"(",
")"
] | Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop. | [
"Stop",
"all",
"scheduled",
"and",
"/",
"or",
"executing",
"tasks",
"first",
"by",
"asking",
"nicely",
"and",
"then",
"by",
"waiting",
"up",
"to",
"timeout",
"seconds",
"before",
"forcefully",
"stopping",
"the",
"asyncio",
"event",
"loop",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L175-L208 |
jreese/tasky | tasky/loop.py | Tasky.start_task | async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total | python | async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total | [
"async",
"def",
"start_task",
"(",
"self",
",",
"task",
":",
"Task",
")",
"->",
"None",
":",
"try",
":",
"Log",
".",
"debug",
"(",
"'task %s starting'",
",",
"task",
".",
"name",
")",
"before",
"=",
"time",
".",
"time",
"(",
")",
"task",
".",
"counters",
"[",
"'last_run'",
"]",
"=",
"before",
"task",
".",
"running",
"=",
"True",
"self",
".",
"running_tasks",
".",
"add",
"(",
"task",
")",
"await",
"task",
".",
"run_task",
"(",
")",
"Log",
".",
"debug",
"(",
"'task %s completed'",
",",
"task",
".",
"name",
")",
"except",
"CancelledError",
":",
"Log",
".",
"debug",
"(",
"'task %s cancelled'",
",",
"task",
".",
"name",
")",
"except",
"Exception",
":",
"Log",
".",
"exception",
"(",
"'unhandled exception in task %s'",
",",
"task",
".",
"name",
")",
"finally",
":",
"self",
".",
"running_tasks",
".",
"discard",
"(",
"task",
")",
"task",
".",
"running",
"=",
"False",
"task",
".",
"task",
"=",
"None",
"after",
"=",
"time",
".",
"time",
"(",
")",
"total",
"=",
"after",
"-",
"before",
"task",
".",
"counters",
"[",
"'last_completed'",
"]",
"=",
"after",
"task",
".",
"counters",
"[",
"'duration'",
"]",
"=",
"total"
] | Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped. | [
"Initialize",
"the",
"task",
"queue",
"it",
"for",
"execution",
"add",
"the",
"done",
"callback",
"and",
"keep",
"track",
"of",
"it",
"for",
"when",
"tasks",
"need",
"to",
"be",
"stopped",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L210-L238 |
jreese/tasky | tasky/loop.py | Tasky.monitor_tasks | async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate) | python | async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate) | [
"async",
"def",
"monitor_tasks",
"(",
"self",
",",
"interval",
":",
"float",
"=",
"1.0",
")",
"->",
"None",
":",
"Log",
".",
"debug",
"(",
"'monitor running'",
")",
"while",
"True",
":",
"try",
":",
"await",
"asyncio",
".",
"sleep",
"(",
"interval",
")",
"for",
"name",
",",
"task",
"in",
"self",
".",
"all_tasks",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"terminate_on_finish",
":",
"if",
"task",
"in",
"self",
".",
"running_tasks",
"and",
"task",
".",
"running",
":",
"await",
"task",
".",
"stop",
"(",
")",
"elif",
"task",
".",
"enabled",
":",
"if",
"task",
"not",
"in",
"self",
".",
"running_tasks",
":",
"Log",
".",
"debug",
"(",
"'task %s enabled, restarting'",
",",
"task",
".",
"name",
")",
"await",
"self",
".",
"insert",
"(",
"task",
")",
"else",
":",
"if",
"task",
"in",
"self",
".",
"running_tasks",
":",
"Log",
".",
"debug",
"(",
"'task %s disabled, stopping'",
",",
"task",
".",
"name",
")",
"await",
"task",
".",
"stop",
"(",
")",
"if",
"self",
".",
"terminate_on_finish",
"and",
"not",
"self",
".",
"running_tasks",
":",
"Log",
".",
"debug",
"(",
"'all tasks completed, terminating'",
")",
"break",
"except",
"CancelledError",
":",
"Log",
".",
"debug",
"(",
"'monitor cancelled'",
")",
"break",
"except",
"Exception",
":",
"Log",
".",
"exception",
"(",
"'monitoring exception'",
")",
"self",
".",
"monitor",
"=",
"None",
"self",
".",
"loop",
".",
"call_later",
"(",
"0",
",",
"self",
".",
"terminate",
")"
] | Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped. | [
"Monitor",
"all",
"known",
"tasks",
"for",
"run",
"state",
".",
"Ensure",
"that",
"enabled",
"tasks",
"are",
"running",
"and",
"that",
"disabled",
"tasks",
"are",
"stopped",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L240-L276 |
jreese/tasky | tasky/loop.py | Tasky.exception | def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception']) | python | def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception']) | [
"def",
"exception",
"(",
"self",
",",
"loop",
":",
"asyncio",
".",
"BaseEventLoop",
",",
"context",
":",
"dict",
")",
"->",
"None",
":",
"Log",
".",
"error",
"(",
"'unhandled exception: %s'",
",",
"context",
"[",
"'message'",
"]",
")",
"Log",
".",
"error",
"(",
"'%s'",
",",
"context",
")",
"if",
"'exception'",
"in",
"context",
":",
"Log",
".",
"error",
"(",
"' %s'",
",",
"context",
"[",
"'exception'",
"]",
")"
] | Log unhandled exceptions from anywhere in the event loop. | [
"Log",
"unhandled",
"exceptions",
"from",
"anywhere",
"in",
"the",
"event",
"loop",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L278-L284 |
jreese/tasky | tasky/loop.py | Tasky.sigint | def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop() | python | def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop() | [
"def",
"sigint",
"(",
"self",
")",
"->",
"None",
":",
"if",
"self",
".",
"stop_attempts",
"<",
"1",
":",
"Log",
".",
"info",
"(",
"'gracefully stopping tasks'",
")",
"self",
".",
"stop_attempts",
"+=",
"1",
"self",
".",
"terminate",
"(",
")",
"elif",
"self",
".",
"stop_attempts",
"<",
"2",
":",
"Log",
".",
"info",
"(",
"'forcefully cancelling tasks'",
")",
"self",
".",
"stop_attempts",
"+=",
"1",
"self",
".",
"terminate",
"(",
"force",
"=",
"True",
")",
"else",
":",
"Log",
".",
"info",
"(",
"'forcefully stopping event loop'",
")",
"self",
".",
"loop",
".",
"stop",
"(",
")"
] | Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses. | [
"Handle",
"the",
"user",
"pressing",
"Ctrl",
"-",
"C",
"by",
"stopping",
"tasks",
"nicely",
"at",
"first",
"then",
"forcibly",
"upon",
"further",
"presses",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L286-L302 |
jreese/tasky | tasky/loop.py | Tasky.sigterm | def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks') | python | def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks') | [
"def",
"sigterm",
"(",
"self",
")",
"->",
"None",
":",
"if",
"self",
".",
"stop_attempts",
"<",
"1",
":",
"Log",
".",
"info",
"(",
"'received SIGTERM, gracefully stopping tasks'",
")",
"self",
".",
"stop_attempts",
"+=",
"1",
"self",
".",
"terminate",
"(",
")",
"else",
":",
"Log",
".",
"info",
"(",
"'received SIGTERM, bravely waiting for tasks'",
")"
] | Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish. | [
"Handle",
"SIGTERM",
"from",
"the",
"system",
"by",
"stopping",
"tasks",
"gracefully",
".",
"Repeated",
"signals",
"will",
"be",
"ignored",
"while",
"waiting",
"for",
"tasks",
"to",
"finish",
"."
] | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L304-L314 |
inspirehep/inspire-query-parser | inspire_query_parser/parser.py | CaseInsensitiveKeyword.parse | def parse(cls, parser, text, pos):
"""Checks if terminal token is a keyword after lower-casing it."""
match = cls.regex.match(text)
if match:
# Check if match is is not in the grammar of the specific keyword class.
if match.group(0).lower() not in cls.grammar:
result = text, SyntaxError(repr(match.group(0)) + " is not a member of " + repr(cls.grammar))
else:
result = text[len(match.group(0)):], cls(match.group(0))
else:
result = text, SyntaxError("expecting " + repr(cls.__name__))
return result | python | def parse(cls, parser, text, pos):
"""Checks if terminal token is a keyword after lower-casing it."""
match = cls.regex.match(text)
if match:
# Check if match is is not in the grammar of the specific keyword class.
if match.group(0).lower() not in cls.grammar:
result = text, SyntaxError(repr(match.group(0)) + " is not a member of " + repr(cls.grammar))
else:
result = text[len(match.group(0)):], cls(match.group(0))
else:
result = text, SyntaxError("expecting " + repr(cls.__name__))
return result | [
"def",
"parse",
"(",
"cls",
",",
"parser",
",",
"text",
",",
"pos",
")",
":",
"match",
"=",
"cls",
".",
"regex",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"# Check if match is is not in the grammar of the specific keyword class.",
"if",
"match",
".",
"group",
"(",
"0",
")",
".",
"lower",
"(",
")",
"not",
"in",
"cls",
".",
"grammar",
":",
"result",
"=",
"text",
",",
"SyntaxError",
"(",
"repr",
"(",
"match",
".",
"group",
"(",
"0",
")",
")",
"+",
"\" is not a member of \"",
"+",
"repr",
"(",
"cls",
".",
"grammar",
")",
")",
"else",
":",
"result",
"=",
"text",
"[",
"len",
"(",
"match",
".",
"group",
"(",
"0",
")",
")",
":",
"]",
",",
"cls",
"(",
"match",
".",
"group",
"(",
"0",
")",
")",
"else",
":",
"result",
"=",
"text",
",",
"SyntaxError",
"(",
"\"expecting \"",
"+",
"repr",
"(",
"cls",
".",
"__name__",
")",
")",
"return",
"result"
] | Checks if terminal token is a keyword after lower-casing it. | [
"Checks",
"if",
"terminal",
"token",
"is",
"a",
"keyword",
"after",
"lower",
"-",
"casing",
"it",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/parser.py#L59-L70 |
inspirehep/inspire-query-parser | inspire_query_parser/parser.py | InspireKeyword.parse | def parse(cls, parser, text, pos):
"""Parse InspireKeyword.
If the keyword is `texkey`, enable the parsing texkey expression flag, since its value contains ':' which
normally isn't allowed.
"""
try:
remaining_text, keyword = parser.parse(text, cls.grammar)
if keyword.lower() == 'texkey':
parser._parsing_texkey_expression = True
return remaining_text, InspireKeyword(keyword)
except SyntaxError as e:
parser._parsing_texkey_expression = False
return text, e | python | def parse(cls, parser, text, pos):
"""Parse InspireKeyword.
If the keyword is `texkey`, enable the parsing texkey expression flag, since its value contains ':' which
normally isn't allowed.
"""
try:
remaining_text, keyword = parser.parse(text, cls.grammar)
if keyword.lower() == 'texkey':
parser._parsing_texkey_expression = True
return remaining_text, InspireKeyword(keyword)
except SyntaxError as e:
parser._parsing_texkey_expression = False
return text, e | [
"def",
"parse",
"(",
"cls",
",",
"parser",
",",
"text",
",",
"pos",
")",
":",
"try",
":",
"remaining_text",
",",
"keyword",
"=",
"parser",
".",
"parse",
"(",
"text",
",",
"cls",
".",
"grammar",
")",
"if",
"keyword",
".",
"lower",
"(",
")",
"==",
"'texkey'",
":",
"parser",
".",
"_parsing_texkey_expression",
"=",
"True",
"return",
"remaining_text",
",",
"InspireKeyword",
"(",
"keyword",
")",
"except",
"SyntaxError",
"as",
"e",
":",
"parser",
".",
"_parsing_texkey_expression",
"=",
"False",
"return",
"text",
",",
"e"
] | Parse InspireKeyword.
If the keyword is `texkey`, enable the parsing texkey expression flag, since its value contains ':' which
normally isn't allowed. | [
"Parse",
"InspireKeyword",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/parser.py#L209-L222 |
inspirehep/inspire-query-parser | inspire_query_parser/parser.py | SimpleValueUnit.parse_terminal_token | def parse_terminal_token(cls, parser, text):
"""Parses a terminal token that doesn't contain parentheses nor colon symbol.
Note:
Handles a special case of tokens where a ':' is needed (for `texkey` queries).
If we're parsing text not in parentheses, then some DSL keywords (e.g. And, Or, Not, defined above) should
not be recognized as terminals, thus we check if they are in the Keywords table (namespace like structure
handled by PyPeg).
This is done only when we are not parsing a parenthesized SimpleValue.
Also, helps in supporting more implicit-and queries cases (last two checks).
"""
token_regex = cls.token_regex
if parser._parsing_texkey_expression:
token_regex = cls.texkey_token_regex
parser._parsing_texkey_expression = False
match = token_regex.match(text)
if match:
matched_token = match.group(0)
# Check if token is a DSL keyword. Disable this check in the case where the parser isn't parsing a
# parenthesized terminal.
if not parser._parsing_parenthesized_terminal and matched_token.lower() in Keyword.table:
return text, SyntaxError("found DSL keyword: " + matched_token)
remaining_text = text[len(matched_token):]
# Attempt to recognize whether current terminal is followed by a ":", which definitely signifies that
# we are parsing a keyword, and we shouldn't.
if cls.starts_with_colon.match(remaining_text):
return text, \
SyntaxError("parsing a keyword (token followed by \":\"): \"" + repr(matched_token) + "\"")
# Attempt to recognize whether current terminal is a non shortened version of Inspire keywords. This is
# done for supporting implicit-and in case of SPIRES style keyword queries. Using the non shortened version
# of the keywords, makes this recognition not eager.
if not parser._parsing_parenthesized_simple_values_expression \
and matched_token in INSPIRE_KEYWORDS_SET:
return text, SyntaxError("parsing a keyword (non shortened INSPIRE keyword)")
result = remaining_text, matched_token
else:
result = text, SyntaxError("expecting match on " + repr(cls.token_regex.pattern))
return result | python | def parse_terminal_token(cls, parser, text):
"""Parses a terminal token that doesn't contain parentheses nor colon symbol.
Note:
Handles a special case of tokens where a ':' is needed (for `texkey` queries).
If we're parsing text not in parentheses, then some DSL keywords (e.g. And, Or, Not, defined above) should
not be recognized as terminals, thus we check if they are in the Keywords table (namespace like structure
handled by PyPeg).
This is done only when we are not parsing a parenthesized SimpleValue.
Also, helps in supporting more implicit-and queries cases (last two checks).
"""
token_regex = cls.token_regex
if parser._parsing_texkey_expression:
token_regex = cls.texkey_token_regex
parser._parsing_texkey_expression = False
match = token_regex.match(text)
if match:
matched_token = match.group(0)
# Check if token is a DSL keyword. Disable this check in the case where the parser isn't parsing a
# parenthesized terminal.
if not parser._parsing_parenthesized_terminal and matched_token.lower() in Keyword.table:
return text, SyntaxError("found DSL keyword: " + matched_token)
remaining_text = text[len(matched_token):]
# Attempt to recognize whether current terminal is followed by a ":", which definitely signifies that
# we are parsing a keyword, and we shouldn't.
if cls.starts_with_colon.match(remaining_text):
return text, \
SyntaxError("parsing a keyword (token followed by \":\"): \"" + repr(matched_token) + "\"")
# Attempt to recognize whether current terminal is a non shortened version of Inspire keywords. This is
# done for supporting implicit-and in case of SPIRES style keyword queries. Using the non shortened version
# of the keywords, makes this recognition not eager.
if not parser._parsing_parenthesized_simple_values_expression \
and matched_token in INSPIRE_KEYWORDS_SET:
return text, SyntaxError("parsing a keyword (non shortened INSPIRE keyword)")
result = remaining_text, matched_token
else:
result = text, SyntaxError("expecting match on " + repr(cls.token_regex.pattern))
return result | [
"def",
"parse_terminal_token",
"(",
"cls",
",",
"parser",
",",
"text",
")",
":",
"token_regex",
"=",
"cls",
".",
"token_regex",
"if",
"parser",
".",
"_parsing_texkey_expression",
":",
"token_regex",
"=",
"cls",
".",
"texkey_token_regex",
"parser",
".",
"_parsing_texkey_expression",
"=",
"False",
"match",
"=",
"token_regex",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"matched_token",
"=",
"match",
".",
"group",
"(",
"0",
")",
"# Check if token is a DSL keyword. Disable this check in the case where the parser isn't parsing a",
"# parenthesized terminal.",
"if",
"not",
"parser",
".",
"_parsing_parenthesized_terminal",
"and",
"matched_token",
".",
"lower",
"(",
")",
"in",
"Keyword",
".",
"table",
":",
"return",
"text",
",",
"SyntaxError",
"(",
"\"found DSL keyword: \"",
"+",
"matched_token",
")",
"remaining_text",
"=",
"text",
"[",
"len",
"(",
"matched_token",
")",
":",
"]",
"# Attempt to recognize whether current terminal is followed by a \":\", which definitely signifies that",
"# we are parsing a keyword, and we shouldn't.",
"if",
"cls",
".",
"starts_with_colon",
".",
"match",
"(",
"remaining_text",
")",
":",
"return",
"text",
",",
"SyntaxError",
"(",
"\"parsing a keyword (token followed by \\\":\\\"): \\\"\"",
"+",
"repr",
"(",
"matched_token",
")",
"+",
"\"\\\"\"",
")",
"# Attempt to recognize whether current terminal is a non shortened version of Inspire keywords. This is",
"# done for supporting implicit-and in case of SPIRES style keyword queries. Using the non shortened version",
"# of the keywords, makes this recognition not eager.",
"if",
"not",
"parser",
".",
"_parsing_parenthesized_simple_values_expression",
"and",
"matched_token",
"in",
"INSPIRE_KEYWORDS_SET",
":",
"return",
"text",
",",
"SyntaxError",
"(",
"\"parsing a keyword (non shortened INSPIRE keyword)\"",
")",
"result",
"=",
"remaining_text",
",",
"matched_token",
"else",
":",
"result",
"=",
"text",
",",
"SyntaxError",
"(",
"\"expecting match on \"",
"+",
"repr",
"(",
"cls",
".",
"token_regex",
".",
"pattern",
")",
")",
"return",
"result"
] | Parses a terminal token that doesn't contain parentheses nor colon symbol.
Note:
Handles a special case of tokens where a ':' is needed (for `texkey` queries).
If we're parsing text not in parentheses, then some DSL keywords (e.g. And, Or, Not, defined above) should
not be recognized as terminals, thus we check if they are in the Keywords table (namespace like structure
handled by PyPeg).
This is done only when we are not parsing a parenthesized SimpleValue.
Also, helps in supporting more implicit-and queries cases (last two checks). | [
"Parses",
"a",
"terminal",
"token",
"that",
"doesn",
"t",
"contain",
"parentheses",
"nor",
"colon",
"symbol",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/parser.py#L255-L300 |
inspirehep/inspire-query-parser | inspire_query_parser/parser.py | SimpleValueUnit.parse | def parse(cls, parser, text, pos):
"""Imitates parsing a list grammar.
Specifically, this
grammar = [
SimpleValueUnit.date_specifiers_regex,
SimpleValueUnit.arxiv_token_regex,
SimpleValueUnit.token_regex,
SimpleValueUnit.parenthesized_token_grammar
].
Parses plaintext which matches date specifiers or arxiv_identifier syntax, or is comprised of either 1) simple
terminal (no parentheses) or 2) a parenthesized SimpleValue.
For example, "e(+)" will be parsed in two steps, first, "e" token will be recognized and then "(+)", as a
parenthesized SimpleValue.
"""
found = False
# Attempt to parse date specifier
match = cls.date_specifiers_regex.match(text)
if match:
remaining_text, token, found = text[len(match.group(0)):], match.group(0), True
else:
# Attempt to parse arxiv identifier
match = cls.arxiv_token_regex.match(text)
if match:
remaining_text, token, found = text[len(match.group()):], match.group(2), True
else:
# Attempt to parse a terminal token
remaining_text, token = SimpleValueUnit.parse_terminal_token(parser, text)
if type(token) != SyntaxError:
found = True
else:
# Attempt to parse a terminal with parentheses
try:
# Enable parsing a parenthesized terminal so that we can accept {+, -, |} as terminals.
parser._parsing_parenthesized_terminal = True
remaining_text, token = parser.parse(text, cls.parenthesized_token_grammar, pos)
found = True
except SyntaxError:
pass
except GrammarValueError:
raise
except ValueError:
pass
finally:
parser._parsing_parenthesized_terminal = False
if found:
result = remaining_text, SimpleValueUnit(token)
else:
result = text, SyntaxError("expecting match on " + cls.__name__)
return result | python | def parse(cls, parser, text, pos):
"""Imitates parsing a list grammar.
Specifically, this
grammar = [
SimpleValueUnit.date_specifiers_regex,
SimpleValueUnit.arxiv_token_regex,
SimpleValueUnit.token_regex,
SimpleValueUnit.parenthesized_token_grammar
].
Parses plaintext which matches date specifiers or arxiv_identifier syntax, or is comprised of either 1) simple
terminal (no parentheses) or 2) a parenthesized SimpleValue.
For example, "e(+)" will be parsed in two steps, first, "e" token will be recognized and then "(+)", as a
parenthesized SimpleValue.
"""
found = False
# Attempt to parse date specifier
match = cls.date_specifiers_regex.match(text)
if match:
remaining_text, token, found = text[len(match.group(0)):], match.group(0), True
else:
# Attempt to parse arxiv identifier
match = cls.arxiv_token_regex.match(text)
if match:
remaining_text, token, found = text[len(match.group()):], match.group(2), True
else:
# Attempt to parse a terminal token
remaining_text, token = SimpleValueUnit.parse_terminal_token(parser, text)
if type(token) != SyntaxError:
found = True
else:
# Attempt to parse a terminal with parentheses
try:
# Enable parsing a parenthesized terminal so that we can accept {+, -, |} as terminals.
parser._parsing_parenthesized_terminal = True
remaining_text, token = parser.parse(text, cls.parenthesized_token_grammar, pos)
found = True
except SyntaxError:
pass
except GrammarValueError:
raise
except ValueError:
pass
finally:
parser._parsing_parenthesized_terminal = False
if found:
result = remaining_text, SimpleValueUnit(token)
else:
result = text, SyntaxError("expecting match on " + cls.__name__)
return result | [
"def",
"parse",
"(",
"cls",
",",
"parser",
",",
"text",
",",
"pos",
")",
":",
"found",
"=",
"False",
"# Attempt to parse date specifier",
"match",
"=",
"cls",
".",
"date_specifiers_regex",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"remaining_text",
",",
"token",
",",
"found",
"=",
"text",
"[",
"len",
"(",
"match",
".",
"group",
"(",
"0",
")",
")",
":",
"]",
",",
"match",
".",
"group",
"(",
"0",
")",
",",
"True",
"else",
":",
"# Attempt to parse arxiv identifier",
"match",
"=",
"cls",
".",
"arxiv_token_regex",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"remaining_text",
",",
"token",
",",
"found",
"=",
"text",
"[",
"len",
"(",
"match",
".",
"group",
"(",
")",
")",
":",
"]",
",",
"match",
".",
"group",
"(",
"2",
")",
",",
"True",
"else",
":",
"# Attempt to parse a terminal token",
"remaining_text",
",",
"token",
"=",
"SimpleValueUnit",
".",
"parse_terminal_token",
"(",
"parser",
",",
"text",
")",
"if",
"type",
"(",
"token",
")",
"!=",
"SyntaxError",
":",
"found",
"=",
"True",
"else",
":",
"# Attempt to parse a terminal with parentheses",
"try",
":",
"# Enable parsing a parenthesized terminal so that we can accept {+, -, |} as terminals.",
"parser",
".",
"_parsing_parenthesized_terminal",
"=",
"True",
"remaining_text",
",",
"token",
"=",
"parser",
".",
"parse",
"(",
"text",
",",
"cls",
".",
"parenthesized_token_grammar",
",",
"pos",
")",
"found",
"=",
"True",
"except",
"SyntaxError",
":",
"pass",
"except",
"GrammarValueError",
":",
"raise",
"except",
"ValueError",
":",
"pass",
"finally",
":",
"parser",
".",
"_parsing_parenthesized_terminal",
"=",
"False",
"if",
"found",
":",
"result",
"=",
"remaining_text",
",",
"SimpleValueUnit",
"(",
"token",
")",
"else",
":",
"result",
"=",
"text",
",",
"SyntaxError",
"(",
"\"expecting match on \"",
"+",
"cls",
".",
"__name__",
")",
"return",
"result"
] | Imitates parsing a list grammar.
Specifically, this
grammar = [
SimpleValueUnit.date_specifiers_regex,
SimpleValueUnit.arxiv_token_regex,
SimpleValueUnit.token_regex,
SimpleValueUnit.parenthesized_token_grammar
].
Parses plaintext which matches date specifiers or arxiv_identifier syntax, or is comprised of either 1) simple
terminal (no parentheses) or 2) a parenthesized SimpleValue.
For example, "e(+)" will be parsed in two steps, first, "e" token will be recognized and then "(+)", as a
parenthesized SimpleValue. | [
"Imitates",
"parsing",
"a",
"list",
"grammar",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/parser.py#L303-L358 |
inspirehep/inspire-query-parser | inspire_query_parser/parser.py | SimpleValue.unconsume_and_reconstruct_input | def unconsume_and_reconstruct_input(remaining_text, recognized_tokens, complex_value_idx):
"""Reconstruct input in case of consuming a keyword query or a value query with ComplexValue as value.
Un-consuming at most 3 elements and specifically (Keyword,) Whitespace and ComplexValue, while also
reconstructing parser's input text.
Example:
Given this query "author foo t 'bar'", r would be:
r = [SimpleValueUnit("foo"), Whitespace(" "), SimpleValueUnit("t"), Whitespace(" "),
SimpleValueUnit("'bar'")]
thus after this method, r would be [SimpleValueUnit("foo"), Whitespace(" ")], while initial text will
have been reconstructed as "t 'bar' rest_of_the_text".
"""
# Default slicing index: i.e. at most 3 elements will be unconsumed, Keyword, Whitespace and ComplexValue.
slicing_start_idx = 2
# Check whether the 3rd element from the end is an InspireKeyword. If not, a Value query with ComplexValue
# was consumed.
if not INSPIRE_PARSER_KEYWORDS.get(recognized_tokens[complex_value_idx - slicing_start_idx].value, None):
slicing_start_idx = 1
reconstructed_terminals = recognized_tokens[:complex_value_idx - slicing_start_idx]
reconstructed_text = '{} {}'.format(
''.join([token.value for token in recognized_tokens[complex_value_idx - slicing_start_idx:]]),
remaining_text
)
return reconstructed_text, reconstructed_terminals | python | def unconsume_and_reconstruct_input(remaining_text, recognized_tokens, complex_value_idx):
"""Reconstruct input in case of consuming a keyword query or a value query with ComplexValue as value.
Un-consuming at most 3 elements and specifically (Keyword,) Whitespace and ComplexValue, while also
reconstructing parser's input text.
Example:
Given this query "author foo t 'bar'", r would be:
r = [SimpleValueUnit("foo"), Whitespace(" "), SimpleValueUnit("t"), Whitespace(" "),
SimpleValueUnit("'bar'")]
thus after this method, r would be [SimpleValueUnit("foo"), Whitespace(" ")], while initial text will
have been reconstructed as "t 'bar' rest_of_the_text".
"""
# Default slicing index: i.e. at most 3 elements will be unconsumed, Keyword, Whitespace and ComplexValue.
slicing_start_idx = 2
# Check whether the 3rd element from the end is an InspireKeyword. If not, a Value query with ComplexValue
# was consumed.
if not INSPIRE_PARSER_KEYWORDS.get(recognized_tokens[complex_value_idx - slicing_start_idx].value, None):
slicing_start_idx = 1
reconstructed_terminals = recognized_tokens[:complex_value_idx - slicing_start_idx]
reconstructed_text = '{} {}'.format(
''.join([token.value for token in recognized_tokens[complex_value_idx - slicing_start_idx:]]),
remaining_text
)
return reconstructed_text, reconstructed_terminals | [
"def",
"unconsume_and_reconstruct_input",
"(",
"remaining_text",
",",
"recognized_tokens",
",",
"complex_value_idx",
")",
":",
"# Default slicing index: i.e. at most 3 elements will be unconsumed, Keyword, Whitespace and ComplexValue.",
"slicing_start_idx",
"=",
"2",
"# Check whether the 3rd element from the end is an InspireKeyword. If not, a Value query with ComplexValue",
"# was consumed.",
"if",
"not",
"INSPIRE_PARSER_KEYWORDS",
".",
"get",
"(",
"recognized_tokens",
"[",
"complex_value_idx",
"-",
"slicing_start_idx",
"]",
".",
"value",
",",
"None",
")",
":",
"slicing_start_idx",
"=",
"1",
"reconstructed_terminals",
"=",
"recognized_tokens",
"[",
":",
"complex_value_idx",
"-",
"slicing_start_idx",
"]",
"reconstructed_text",
"=",
"'{} {}'",
".",
"format",
"(",
"''",
".",
"join",
"(",
"[",
"token",
".",
"value",
"for",
"token",
"in",
"recognized_tokens",
"[",
"complex_value_idx",
"-",
"slicing_start_idx",
":",
"]",
"]",
")",
",",
"remaining_text",
")",
"return",
"reconstructed_text",
",",
"reconstructed_terminals"
] | Reconstruct input in case of consuming a keyword query or a value query with ComplexValue as value.
Un-consuming at most 3 elements and specifically (Keyword,) Whitespace and ComplexValue, while also
reconstructing parser's input text.
Example:
Given this query "author foo t 'bar'", r would be:
r = [SimpleValueUnit("foo"), Whitespace(" "), SimpleValueUnit("t"), Whitespace(" "),
SimpleValueUnit("'bar'")]
thus after this method, r would be [SimpleValueUnit("foo"), Whitespace(" ")], while initial text will
have been reconstructed as "t 'bar' rest_of_the_text". | [
"Reconstruct",
"input",
"in",
"case",
"of",
"consuming",
"a",
"keyword",
"query",
"or",
"a",
"value",
"query",
"with",
"ComplexValue",
"as",
"value",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/parser.py#L379-L405 |
inspirehep/inspire-query-parser | inspire_query_parser/parser.py | ParenthesizedSimpleValues.parse | def parse(cls, parser, text, pos):
"""Using our own parse to enable the flag below."""
try:
parser._parsing_parenthesized_simple_values_expression = True
remaining_text, recognized_tokens = parser.parse(text, cls.grammar)
return remaining_text, recognized_tokens
except SyntaxError as e:
return text, e
finally:
parser._parsing_parenthesized_simple_values_expression = False | python | def parse(cls, parser, text, pos):
"""Using our own parse to enable the flag below."""
try:
parser._parsing_parenthesized_simple_values_expression = True
remaining_text, recognized_tokens = parser.parse(text, cls.grammar)
return remaining_text, recognized_tokens
except SyntaxError as e:
return text, e
finally:
parser._parsing_parenthesized_simple_values_expression = False | [
"def",
"parse",
"(",
"cls",
",",
"parser",
",",
"text",
",",
"pos",
")",
":",
"try",
":",
"parser",
".",
"_parsing_parenthesized_simple_values_expression",
"=",
"True",
"remaining_text",
",",
"recognized_tokens",
"=",
"parser",
".",
"parse",
"(",
"text",
",",
"cls",
".",
"grammar",
")",
"return",
"remaining_text",
",",
"recognized_tokens",
"except",
"SyntaxError",
"as",
"e",
":",
"return",
"text",
",",
"e",
"finally",
":",
"parser",
".",
"_parsing_parenthesized_simple_values_expression",
"=",
"False"
] | Using our own parse to enable the flag below. | [
"Using",
"our",
"own",
"parse",
"to",
"enable",
"the",
"flag",
"below",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/parser.py#L534-L543 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor._generate_fieldnames_if_bai_query | def _generate_fieldnames_if_bai_query(self, node_value, bai_field_variation, query_bai_field_if_dots_in_name):
"""Generates new fieldnames in case of BAI query.
Args:
node_value (six.text_type): The node's value (i.e. author name).
bai_field_variation (six.text_type): Which field variation to query ('search' or 'raw').
query_bai_field_if_dots_in_name (bool): Whether to query BAI field (in addition to author's name field)
if dots exist in the name and name contains no whitespace.
Returns:
list: Fieldnames to query on, in case of BAI query or None, otherwise.
Raises:
ValueError, if ``field_variation`` is not one of ('search', 'raw').
"""
if bai_field_variation not in (FieldVariations.search, FieldVariations.raw):
raise ValueError('Non supported field variation "{}".'.format(bai_field_variation))
normalized_author_name = normalize_name(node_value).strip('.')
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] and \
ElasticSearchVisitor.BAI_REGEX.match(node_value):
return [ElasticSearchVisitor.AUTHORS_BAI_FIELD + '.' + bai_field_variation]
elif not whitespace.search(normalized_author_name) and \
query_bai_field_if_dots_in_name and \
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] and \
'.' in normalized_author_name:
# Case of partial BAI, e.g. ``J.Smith``.
return [ElasticSearchVisitor.AUTHORS_BAI_FIELD + '.' + bai_field_variation] + \
force_list(ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'])
else:
return None | python | def _generate_fieldnames_if_bai_query(self, node_value, bai_field_variation, query_bai_field_if_dots_in_name):
"""Generates new fieldnames in case of BAI query.
Args:
node_value (six.text_type): The node's value (i.e. author name).
bai_field_variation (six.text_type): Which field variation to query ('search' or 'raw').
query_bai_field_if_dots_in_name (bool): Whether to query BAI field (in addition to author's name field)
if dots exist in the name and name contains no whitespace.
Returns:
list: Fieldnames to query on, in case of BAI query or None, otherwise.
Raises:
ValueError, if ``field_variation`` is not one of ('search', 'raw').
"""
if bai_field_variation not in (FieldVariations.search, FieldVariations.raw):
raise ValueError('Non supported field variation "{}".'.format(bai_field_variation))
normalized_author_name = normalize_name(node_value).strip('.')
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] and \
ElasticSearchVisitor.BAI_REGEX.match(node_value):
return [ElasticSearchVisitor.AUTHORS_BAI_FIELD + '.' + bai_field_variation]
elif not whitespace.search(normalized_author_name) and \
query_bai_field_if_dots_in_name and \
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] and \
'.' in normalized_author_name:
# Case of partial BAI, e.g. ``J.Smith``.
return [ElasticSearchVisitor.AUTHORS_BAI_FIELD + '.' + bai_field_variation] + \
force_list(ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'])
else:
return None | [
"def",
"_generate_fieldnames_if_bai_query",
"(",
"self",
",",
"node_value",
",",
"bai_field_variation",
",",
"query_bai_field_if_dots_in_name",
")",
":",
"if",
"bai_field_variation",
"not",
"in",
"(",
"FieldVariations",
".",
"search",
",",
"FieldVariations",
".",
"raw",
")",
":",
"raise",
"ValueError",
"(",
"'Non supported field variation \"{}\".'",
".",
"format",
"(",
"bai_field_variation",
")",
")",
"normalized_author_name",
"=",
"normalize_name",
"(",
"node_value",
")",
".",
"strip",
"(",
"'.'",
")",
"if",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'author'",
"]",
"and",
"ElasticSearchVisitor",
".",
"BAI_REGEX",
".",
"match",
"(",
"node_value",
")",
":",
"return",
"[",
"ElasticSearchVisitor",
".",
"AUTHORS_BAI_FIELD",
"+",
"'.'",
"+",
"bai_field_variation",
"]",
"elif",
"not",
"whitespace",
".",
"search",
"(",
"normalized_author_name",
")",
"and",
"query_bai_field_if_dots_in_name",
"and",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'author'",
"]",
"and",
"'.'",
"in",
"normalized_author_name",
":",
"# Case of partial BAI, e.g. ``J.Smith``.",
"return",
"[",
"ElasticSearchVisitor",
".",
"AUTHORS_BAI_FIELD",
"+",
"'.'",
"+",
"bai_field_variation",
"]",
"+",
"force_list",
"(",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'author'",
"]",
")",
"else",
":",
"return",
"None"
] | Generates new fieldnames in case of BAI query.
Args:
node_value (six.text_type): The node's value (i.e. author name).
bai_field_variation (six.text_type): Which field variation to query ('search' or 'raw').
query_bai_field_if_dots_in_name (bool): Whether to query BAI field (in addition to author's name field)
if dots exist in the name and name contains no whitespace.
Returns:
list: Fieldnames to query on, in case of BAI query or None, otherwise.
Raises:
ValueError, if ``field_variation`` is not one of ('search', 'raw'). | [
"Generates",
"new",
"fieldnames",
"in",
"case",
"of",
"BAI",
"query",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L156-L189 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor._generate_author_query | def _generate_author_query(self, author_name):
"""Generates a query handling specifically authors.
Notes:
The match query is generic enough to return many results. Then, using the filter clause we truncate these
so that we imitate legacy's behaviour on returning more "exact" results. E.g. Searching for `Smith, John`
shouldn't return papers of 'Smith, Bob'.
Additionally, doing a ``match`` with ``"operator": "and"`` in order to be even more exact in our search, by
requiring that ``full_name`` field contains both
"""
name_variations = [name_variation.lower()
for name_variation
in generate_minimal_name_variations(author_name)]
# When the query contains sufficient data, i.e. full names, e.g. ``Mele, Salvatore`` (and not ``Mele, S`` or
# ``Mele``) we can improve our filtering in order to filter out results containing records with authors that
# have the same non lastnames prefix, e.g. 'Mele, Samuele'.
if author_name_contains_fullnames(author_name):
specialized_author_filter = [
{
'bool': {
'must': [
{
'term': {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: names_variation[0]}
},
generate_match_query(
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'],
names_variation[1],
with_operator_and=True
)
]
}
} for names_variation
in product(name_variations, name_variations)
]
else:
# In the case of initials or even single lastname search, filter with only the name variations.
specialized_author_filter = [
{'term': {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: name_variation}}
for name_variation in name_variations
]
query = {
'bool': {
'filter': {
'bool': {
'should': specialized_author_filter
}
},
'must': {
'match': {
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author']: author_name
}
}
}
}
return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query) | python | def _generate_author_query(self, author_name):
"""Generates a query handling specifically authors.
Notes:
The match query is generic enough to return many results. Then, using the filter clause we truncate these
so that we imitate legacy's behaviour on returning more "exact" results. E.g. Searching for `Smith, John`
shouldn't return papers of 'Smith, Bob'.
Additionally, doing a ``match`` with ``"operator": "and"`` in order to be even more exact in our search, by
requiring that ``full_name`` field contains both
"""
name_variations = [name_variation.lower()
for name_variation
in generate_minimal_name_variations(author_name)]
# When the query contains sufficient data, i.e. full names, e.g. ``Mele, Salvatore`` (and not ``Mele, S`` or
# ``Mele``) we can improve our filtering in order to filter out results containing records with authors that
# have the same non lastnames prefix, e.g. 'Mele, Samuele'.
if author_name_contains_fullnames(author_name):
specialized_author_filter = [
{
'bool': {
'must': [
{
'term': {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: names_variation[0]}
},
generate_match_query(
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'],
names_variation[1],
with_operator_and=True
)
]
}
} for names_variation
in product(name_variations, name_variations)
]
else:
# In the case of initials or even single lastname search, filter with only the name variations.
specialized_author_filter = [
{'term': {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: name_variation}}
for name_variation in name_variations
]
query = {
'bool': {
'filter': {
'bool': {
'should': specialized_author_filter
}
},
'must': {
'match': {
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author']: author_name
}
}
}
}
return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query) | [
"def",
"_generate_author_query",
"(",
"self",
",",
"author_name",
")",
":",
"name_variations",
"=",
"[",
"name_variation",
".",
"lower",
"(",
")",
"for",
"name_variation",
"in",
"generate_minimal_name_variations",
"(",
"author_name",
")",
"]",
"# When the query contains sufficient data, i.e. full names, e.g. ``Mele, Salvatore`` (and not ``Mele, S`` or",
"# ``Mele``) we can improve our filtering in order to filter out results containing records with authors that",
"# have the same non lastnames prefix, e.g. 'Mele, Samuele'.",
"if",
"author_name_contains_fullnames",
"(",
"author_name",
")",
":",
"specialized_author_filter",
"=",
"[",
"{",
"'bool'",
":",
"{",
"'must'",
":",
"[",
"{",
"'term'",
":",
"{",
"ElasticSearchVisitor",
".",
"AUTHORS_NAME_VARIATIONS_FIELD",
":",
"names_variation",
"[",
"0",
"]",
"}",
"}",
",",
"generate_match_query",
"(",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'author'",
"]",
",",
"names_variation",
"[",
"1",
"]",
",",
"with_operator_and",
"=",
"True",
")",
"]",
"}",
"}",
"for",
"names_variation",
"in",
"product",
"(",
"name_variations",
",",
"name_variations",
")",
"]",
"else",
":",
"# In the case of initials or even single lastname search, filter with only the name variations.",
"specialized_author_filter",
"=",
"[",
"{",
"'term'",
":",
"{",
"ElasticSearchVisitor",
".",
"AUTHORS_NAME_VARIATIONS_FIELD",
":",
"name_variation",
"}",
"}",
"for",
"name_variation",
"in",
"name_variations",
"]",
"query",
"=",
"{",
"'bool'",
":",
"{",
"'filter'",
":",
"{",
"'bool'",
":",
"{",
"'should'",
":",
"specialized_author_filter",
"}",
"}",
",",
"'must'",
":",
"{",
"'match'",
":",
"{",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'author'",
"]",
":",
"author_name",
"}",
"}",
"}",
"}",
"return",
"generate_nested_query",
"(",
"ElasticSearchVisitor",
".",
"AUTHORS_NESTED_QUERY_PATH",
",",
"query",
")"
] | Generates a query handling specifically authors.
Notes:
The match query is generic enough to return many results. Then, using the filter clause we truncate these
so that we imitate legacy's behaviour on returning more "exact" results. E.g. Searching for `Smith, John`
shouldn't return papers of 'Smith, Bob'.
Additionally, doing a ``match`` with ``"operator": "and"`` in order to be even more exact in our search, by
requiring that ``full_name`` field contains both | [
"Generates",
"a",
"query",
"handling",
"specifically",
"authors",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L191-L250 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor._generate_exact_author_query | def _generate_exact_author_query(self, author_name_or_bai):
"""Generates a term query handling authors and BAIs.
Notes:
If given value is a BAI, search for the provided value in the raw field variation of
`ElasticSearchVisitor.AUTHORS_BAI_FIELD`.
Otherwise, the value will be procesed in the same way as the indexed value (i.e. lowercased and normalized
(inspire_utils.normalize_name and then NFKC normalization).
E.g. Searching for 'Smith, J.' is the same as searching for: 'Smith, J', 'smith, j.', 'smith j', 'j smith',
'j. smith', 'J Smith', 'J. Smith'.
"""
if ElasticSearchVisitor.BAI_REGEX.match(author_name_or_bai):
bai = author_name_or_bai.lower()
query = self._generate_term_query(
'.'.join((ElasticSearchVisitor.AUTHORS_BAI_FIELD, FieldVariations.search)),
bai
)
else:
author_name = normalize('NFKC', normalize_name(author_name_or_bai)).lower()
query = self._generate_term_query(
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['exact-author'],
author_name
)
return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query) | python | def _generate_exact_author_query(self, author_name_or_bai):
"""Generates a term query handling authors and BAIs.
Notes:
If given value is a BAI, search for the provided value in the raw field variation of
`ElasticSearchVisitor.AUTHORS_BAI_FIELD`.
Otherwise, the value will be procesed in the same way as the indexed value (i.e. lowercased and normalized
(inspire_utils.normalize_name and then NFKC normalization).
E.g. Searching for 'Smith, J.' is the same as searching for: 'Smith, J', 'smith, j.', 'smith j', 'j smith',
'j. smith', 'J Smith', 'J. Smith'.
"""
if ElasticSearchVisitor.BAI_REGEX.match(author_name_or_bai):
bai = author_name_or_bai.lower()
query = self._generate_term_query(
'.'.join((ElasticSearchVisitor.AUTHORS_BAI_FIELD, FieldVariations.search)),
bai
)
else:
author_name = normalize('NFKC', normalize_name(author_name_or_bai)).lower()
query = self._generate_term_query(
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['exact-author'],
author_name
)
return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query) | [
"def",
"_generate_exact_author_query",
"(",
"self",
",",
"author_name_or_bai",
")",
":",
"if",
"ElasticSearchVisitor",
".",
"BAI_REGEX",
".",
"match",
"(",
"author_name_or_bai",
")",
":",
"bai",
"=",
"author_name_or_bai",
".",
"lower",
"(",
")",
"query",
"=",
"self",
".",
"_generate_term_query",
"(",
"'.'",
".",
"join",
"(",
"(",
"ElasticSearchVisitor",
".",
"AUTHORS_BAI_FIELD",
",",
"FieldVariations",
".",
"search",
")",
")",
",",
"bai",
")",
"else",
":",
"author_name",
"=",
"normalize",
"(",
"'NFKC'",
",",
"normalize_name",
"(",
"author_name_or_bai",
")",
")",
".",
"lower",
"(",
")",
"query",
"=",
"self",
".",
"_generate_term_query",
"(",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'exact-author'",
"]",
",",
"author_name",
")",
"return",
"generate_nested_query",
"(",
"ElasticSearchVisitor",
".",
"AUTHORS_NESTED_QUERY_PATH",
",",
"query",
")"
] | Generates a term query handling authors and BAIs.
Notes:
If given value is a BAI, search for the provided value in the raw field variation of
`ElasticSearchVisitor.AUTHORS_BAI_FIELD`.
Otherwise, the value will be procesed in the same way as the indexed value (i.e. lowercased and normalized
(inspire_utils.normalize_name and then NFKC normalization).
E.g. Searching for 'Smith, J.' is the same as searching for: 'Smith, J', 'smith, j.', 'smith j', 'j smith',
'j. smith', 'J Smith', 'J. Smith'. | [
"Generates",
"a",
"term",
"query",
"handling",
"authors",
"and",
"BAIs",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L252-L276 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor._generate_date_with_wildcard_query | def _generate_date_with_wildcard_query(self, date_value):
"""Helper for generating a date keyword query containing a wildcard.
Returns:
(dict): The date query containing the wildcard or an empty dict in case the date value is malformed.
The policy followed here is quite conservative on what it accepts as valid input. Look into
:meth:`inspire_query_parser.utils.visitor_utils._truncate_wildcard_from_date` for more information.
"""
if date_value.endswith(ast.GenericValue.WILDCARD_TOKEN):
try:
date_value = _truncate_wildcard_from_date(date_value)
except ValueError:
# Drop date query.
return {}
return self._generate_range_queries(self.KEYWORD_TO_ES_FIELDNAME['date'],
{ES_RANGE_EQ_OPERATOR: date_value})
else:
# Drop date query with wildcard not as suffix, e.g. 2000-1*-31
return {} | python | def _generate_date_with_wildcard_query(self, date_value):
"""Helper for generating a date keyword query containing a wildcard.
Returns:
(dict): The date query containing the wildcard or an empty dict in case the date value is malformed.
The policy followed here is quite conservative on what it accepts as valid input. Look into
:meth:`inspire_query_parser.utils.visitor_utils._truncate_wildcard_from_date` for more information.
"""
if date_value.endswith(ast.GenericValue.WILDCARD_TOKEN):
try:
date_value = _truncate_wildcard_from_date(date_value)
except ValueError:
# Drop date query.
return {}
return self._generate_range_queries(self.KEYWORD_TO_ES_FIELDNAME['date'],
{ES_RANGE_EQ_OPERATOR: date_value})
else:
# Drop date query with wildcard not as suffix, e.g. 2000-1*-31
return {} | [
"def",
"_generate_date_with_wildcard_query",
"(",
"self",
",",
"date_value",
")",
":",
"if",
"date_value",
".",
"endswith",
"(",
"ast",
".",
"GenericValue",
".",
"WILDCARD_TOKEN",
")",
":",
"try",
":",
"date_value",
"=",
"_truncate_wildcard_from_date",
"(",
"date_value",
")",
"except",
"ValueError",
":",
"# Drop date query.",
"return",
"{",
"}",
"return",
"self",
".",
"_generate_range_queries",
"(",
"self",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'date'",
"]",
",",
"{",
"ES_RANGE_EQ_OPERATOR",
":",
"date_value",
"}",
")",
"else",
":",
"# Drop date query with wildcard not as suffix, e.g. 2000-1*-31",
"return",
"{",
"}"
] | Helper for generating a date keyword query containing a wildcard.
Returns:
(dict): The date query containing the wildcard or an empty dict in case the date value is malformed.
The policy followed here is quite conservative on what it accepts as valid input. Look into
:meth:`inspire_query_parser.utils.visitor_utils._truncate_wildcard_from_date` for more information. | [
"Helper",
"for",
"generating",
"a",
"date",
"keyword",
"query",
"containing",
"a",
"wildcard",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L278-L298 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor._generate_queries_for_title_symbols | def _generate_queries_for_title_symbols(title_field, query_value):
"""Generate queries for any symbols in the title against the whitespace tokenized field of titles.
Returns:
(dict): The query or queries for the whitespace tokenized field of titles. If none such tokens exist, then
returns an empty dict.
Notes:
Splits the value stream into tokens according to whitespace.
Heuristically identifies the ones that contain symbol-indicating-characters (examples of those tokens are
"g-2", "SU(2)").
"""
values_tokenized_by_whitespace = query_value.split()
symbol_queries = []
for value in values_tokenized_by_whitespace:
# Heuristic: If there's a symbol-indicating-character in the value, it signifies terms that should be
# queried against the whitespace-tokenized title.
if any(character in value for character in ElasticSearchVisitor.TITLE_SYMBOL_INDICATING_CHARACTER):
symbol_queries.append(
generate_match_query(
'.'.join([title_field, FieldVariations.search]),
value,
with_operator_and=False
)
)
return wrap_queries_in_bool_clauses_if_more_than_one(symbol_queries, use_must_clause=True) | python | def _generate_queries_for_title_symbols(title_field, query_value):
"""Generate queries for any symbols in the title against the whitespace tokenized field of titles.
Returns:
(dict): The query or queries for the whitespace tokenized field of titles. If none such tokens exist, then
returns an empty dict.
Notes:
Splits the value stream into tokens according to whitespace.
Heuristically identifies the ones that contain symbol-indicating-characters (examples of those tokens are
"g-2", "SU(2)").
"""
values_tokenized_by_whitespace = query_value.split()
symbol_queries = []
for value in values_tokenized_by_whitespace:
# Heuristic: If there's a symbol-indicating-character in the value, it signifies terms that should be
# queried against the whitespace-tokenized title.
if any(character in value for character in ElasticSearchVisitor.TITLE_SYMBOL_INDICATING_CHARACTER):
symbol_queries.append(
generate_match_query(
'.'.join([title_field, FieldVariations.search]),
value,
with_operator_and=False
)
)
return wrap_queries_in_bool_clauses_if_more_than_one(symbol_queries, use_must_clause=True) | [
"def",
"_generate_queries_for_title_symbols",
"(",
"title_field",
",",
"query_value",
")",
":",
"values_tokenized_by_whitespace",
"=",
"query_value",
".",
"split",
"(",
")",
"symbol_queries",
"=",
"[",
"]",
"for",
"value",
"in",
"values_tokenized_by_whitespace",
":",
"# Heuristic: If there's a symbol-indicating-character in the value, it signifies terms that should be",
"# queried against the whitespace-tokenized title.",
"if",
"any",
"(",
"character",
"in",
"value",
"for",
"character",
"in",
"ElasticSearchVisitor",
".",
"TITLE_SYMBOL_INDICATING_CHARACTER",
")",
":",
"symbol_queries",
".",
"append",
"(",
"generate_match_query",
"(",
"'.'",
".",
"join",
"(",
"[",
"title_field",
",",
"FieldVariations",
".",
"search",
"]",
")",
",",
"value",
",",
"with_operator_and",
"=",
"False",
")",
")",
"return",
"wrap_queries_in_bool_clauses_if_more_than_one",
"(",
"symbol_queries",
",",
"use_must_clause",
"=",
"True",
")"
] | Generate queries for any symbols in the title against the whitespace tokenized field of titles.
Returns:
(dict): The query or queries for the whitespace tokenized field of titles. If none such tokens exist, then
returns an empty dict.
Notes:
Splits the value stream into tokens according to whitespace.
Heuristically identifies the ones that contain symbol-indicating-characters (examples of those tokens are
"g-2", "SU(2)"). | [
"Generate",
"queries",
"for",
"any",
"symbols",
"in",
"the",
"title",
"against",
"the",
"whitespace",
"tokenized",
"field",
"of",
"titles",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L301-L327 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor._generate_type_code_query | def _generate_type_code_query(self, value):
"""Generate type-code queries.
Notes:
If the value of the type-code query exists in `TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING, then we
query the specified field, along with the given value according to the mapping.
See: https://github.com/inspirehep/inspire-query-parser/issues/79
Otherwise, we query both ``document_type`` and ``publication_info``.
"""
mapping_for_value = self.TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING.get(value, None)
if mapping_for_value:
return generate_match_query(*mapping_for_value, with_operator_and=True)
else:
return {
'bool': {
'minimum_should_match': 1,
'should': [
generate_match_query('document_type', value, with_operator_and=True),
generate_match_query('publication_type', value, with_operator_and=True),
]
}
} | python | def _generate_type_code_query(self, value):
"""Generate type-code queries.
Notes:
If the value of the type-code query exists in `TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING, then we
query the specified field, along with the given value according to the mapping.
See: https://github.com/inspirehep/inspire-query-parser/issues/79
Otherwise, we query both ``document_type`` and ``publication_info``.
"""
mapping_for_value = self.TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING.get(value, None)
if mapping_for_value:
return generate_match_query(*mapping_for_value, with_operator_and=True)
else:
return {
'bool': {
'minimum_should_match': 1,
'should': [
generate_match_query('document_type', value, with_operator_and=True),
generate_match_query('publication_type', value, with_operator_and=True),
]
}
} | [
"def",
"_generate_type_code_query",
"(",
"self",
",",
"value",
")",
":",
"mapping_for_value",
"=",
"self",
".",
"TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING",
".",
"get",
"(",
"value",
",",
"None",
")",
"if",
"mapping_for_value",
":",
"return",
"generate_match_query",
"(",
"*",
"mapping_for_value",
",",
"with_operator_and",
"=",
"True",
")",
"else",
":",
"return",
"{",
"'bool'",
":",
"{",
"'minimum_should_match'",
":",
"1",
",",
"'should'",
":",
"[",
"generate_match_query",
"(",
"'document_type'",
",",
"value",
",",
"with_operator_and",
"=",
"True",
")",
",",
"generate_match_query",
"(",
"'publication_type'",
",",
"value",
",",
"with_operator_and",
"=",
"True",
")",
",",
"]",
"}",
"}"
] | Generate type-code queries.
Notes:
If the value of the type-code query exists in `TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING, then we
query the specified field, along with the given value according to the mapping.
See: https://github.com/inspirehep/inspire-query-parser/issues/79
Otherwise, we query both ``document_type`` and ``publication_info``. | [
"Generate",
"type",
"-",
"code",
"queries",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L339-L361 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor._generate_range_queries | def _generate_range_queries(self, fieldnames, operator_value_pairs):
"""Generates ElasticSearch range queries.
Args:
fieldnames (list): The fieldnames on which the search is the range query is targeted on,
operator_value_pairs (dict): Contains (range_operator, value) pairs.
The range_operator should be one of those supported by ElasticSearch (e.g. 'gt', 'lt', 'ge', 'le').
The value should be of type int or string.
Notes:
A bool should query with multiple range sub-queries is generated so that even if one of the multiple fields
is missing from a document, ElasticSearch will be able to match some records.
In the case of a 'date' keyword query, it updates date values after normalizing them by using
:meth:`inspire_query_parser.utils.visitor_utils.update_date_value_in_operator_value_pairs_for_fieldname`.
Additionally, in the aforementioned case, if a malformed date has been given, then the the method will
return an empty dictionary.
"""
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['date'] == fieldnames:
range_queries = []
for fieldname in fieldnames:
updated_operator_value_pairs = \
update_date_value_in_operator_value_pairs_for_fieldname(fieldname, operator_value_pairs)
if not updated_operator_value_pairs:
break # Malformed date
else:
range_query = {
'range': {
fieldname: updated_operator_value_pairs
}
}
range_queries.append(
generate_nested_query(ElasticSearchVisitor.DATE_NESTED_QUERY_PATH, range_query)
if fieldname in ElasticSearchVisitor.DATE_NESTED_FIELDS
else range_query
)
else:
range_queries = [{
'range': {
fieldname: operator_value_pairs
}
}
for fieldname in fieldnames
]
return wrap_queries_in_bool_clauses_if_more_than_one(range_queries, use_must_clause=False) | python | def _generate_range_queries(self, fieldnames, operator_value_pairs):
"""Generates ElasticSearch range queries.
Args:
fieldnames (list): The fieldnames on which the search is the range query is targeted on,
operator_value_pairs (dict): Contains (range_operator, value) pairs.
The range_operator should be one of those supported by ElasticSearch (e.g. 'gt', 'lt', 'ge', 'le').
The value should be of type int or string.
Notes:
A bool should query with multiple range sub-queries is generated so that even if one of the multiple fields
is missing from a document, ElasticSearch will be able to match some records.
In the case of a 'date' keyword query, it updates date values after normalizing them by using
:meth:`inspire_query_parser.utils.visitor_utils.update_date_value_in_operator_value_pairs_for_fieldname`.
Additionally, in the aforementioned case, if a malformed date has been given, then the the method will
return an empty dictionary.
"""
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['date'] == fieldnames:
range_queries = []
for fieldname in fieldnames:
updated_operator_value_pairs = \
update_date_value_in_operator_value_pairs_for_fieldname(fieldname, operator_value_pairs)
if not updated_operator_value_pairs:
break # Malformed date
else:
range_query = {
'range': {
fieldname: updated_operator_value_pairs
}
}
range_queries.append(
generate_nested_query(ElasticSearchVisitor.DATE_NESTED_QUERY_PATH, range_query)
if fieldname in ElasticSearchVisitor.DATE_NESTED_FIELDS
else range_query
)
else:
range_queries = [{
'range': {
fieldname: operator_value_pairs
}
}
for fieldname in fieldnames
]
return wrap_queries_in_bool_clauses_if_more_than_one(range_queries, use_must_clause=False) | [
"def",
"_generate_range_queries",
"(",
"self",
",",
"fieldnames",
",",
"operator_value_pairs",
")",
":",
"if",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'date'",
"]",
"==",
"fieldnames",
":",
"range_queries",
"=",
"[",
"]",
"for",
"fieldname",
"in",
"fieldnames",
":",
"updated_operator_value_pairs",
"=",
"update_date_value_in_operator_value_pairs_for_fieldname",
"(",
"fieldname",
",",
"operator_value_pairs",
")",
"if",
"not",
"updated_operator_value_pairs",
":",
"break",
"# Malformed date",
"else",
":",
"range_query",
"=",
"{",
"'range'",
":",
"{",
"fieldname",
":",
"updated_operator_value_pairs",
"}",
"}",
"range_queries",
".",
"append",
"(",
"generate_nested_query",
"(",
"ElasticSearchVisitor",
".",
"DATE_NESTED_QUERY_PATH",
",",
"range_query",
")",
"if",
"fieldname",
"in",
"ElasticSearchVisitor",
".",
"DATE_NESTED_FIELDS",
"else",
"range_query",
")",
"else",
":",
"range_queries",
"=",
"[",
"{",
"'range'",
":",
"{",
"fieldname",
":",
"operator_value_pairs",
"}",
"}",
"for",
"fieldname",
"in",
"fieldnames",
"]",
"return",
"wrap_queries_in_bool_clauses_if_more_than_one",
"(",
"range_queries",
",",
"use_must_clause",
"=",
"False",
")"
] | Generates ElasticSearch range queries.
Args:
fieldnames (list): The fieldnames on which the search is the range query is targeted on,
operator_value_pairs (dict): Contains (range_operator, value) pairs.
The range_operator should be one of those supported by ElasticSearch (e.g. 'gt', 'lt', 'ge', 'le').
The value should be of type int or string.
Notes:
A bool should query with multiple range sub-queries is generated so that even if one of the multiple fields
is missing from a document, ElasticSearch will be able to match some records.
In the case of a 'date' keyword query, it updates date values after normalizing them by using
:meth:`inspire_query_parser.utils.visitor_utils.update_date_value_in_operator_value_pairs_for_fieldname`.
Additionally, in the aforementioned case, if a malformed date has been given, then the the method will
return an empty dictionary. | [
"Generates",
"ElasticSearch",
"range",
"queries",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L411-L458 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor._generate_malformed_query | def _generate_malformed_query(data):
"""Generates a query on the ``_all`` field with all the query content.
Args:
data (six.text_type or list): The query in the format of ``six.text_type`` (when used from parsing driver)
or ``list`` when used from withing the ES visitor.
"""
if isinstance(data, six.text_type):
# Remove colon character (special character for ES)
query_str = data.replace(':', ' ')
else:
query_str = ' '.join([word.strip(':') for word in data.children])
return {
'simple_query_string': {
'fields': ['_all'],
'query': query_str
}
} | python | def _generate_malformed_query(data):
"""Generates a query on the ``_all`` field with all the query content.
Args:
data (six.text_type or list): The query in the format of ``six.text_type`` (when used from parsing driver)
or ``list`` when used from withing the ES visitor.
"""
if isinstance(data, six.text_type):
# Remove colon character (special character for ES)
query_str = data.replace(':', ' ')
else:
query_str = ' '.join([word.strip(':') for word in data.children])
return {
'simple_query_string': {
'fields': ['_all'],
'query': query_str
}
} | [
"def",
"_generate_malformed_query",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"six",
".",
"text_type",
")",
":",
"# Remove colon character (special character for ES)",
"query_str",
"=",
"data",
".",
"replace",
"(",
"':'",
",",
"' '",
")",
"else",
":",
"query_str",
"=",
"' '",
".",
"join",
"(",
"[",
"word",
".",
"strip",
"(",
"':'",
")",
"for",
"word",
"in",
"data",
".",
"children",
"]",
")",
"return",
"{",
"'simple_query_string'",
":",
"{",
"'fields'",
":",
"[",
"'_all'",
"]",
",",
"'query'",
":",
"query_str",
"}",
"}"
] | Generates a query on the ``_all`` field with all the query content.
Args:
data (six.text_type or list): The query in the format of ``six.text_type`` (when used from parsing driver)
or ``list`` when used from withing the ES visitor. | [
"Generates",
"a",
"query",
"on",
"the",
"_all",
"field",
"with",
"all",
"the",
"query",
"content",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L461-L479 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor._preprocess_journal_query_value | def _preprocess_journal_query_value(third_journal_field, old_publication_info_values):
"""Transforms the given journal query value (old publication info) to the new one.
Args:
third_journal_field (six.text_type): The final field to be used for populating the old publication info.
old_publication_info_values (six.text_type): The old publication info. It must be one of {only title, title
& volume, title & volume & artid/page_start}.
Returns:
(dict) The new publication info.
"""
# Prepare old publication info for :meth:`inspire_schemas.utils.convert_old_publication_info_to_new`.
publication_info_keys = [
ElasticSearchVisitor.JOURNAL_TITLE,
ElasticSearchVisitor.JOURNAL_VOLUME,
third_journal_field,
]
values_list = [
value.strip()
for value
in old_publication_info_values.split(',')
if value
]
old_publication_info = [
{
key: value
for key, value
in zip(publication_info_keys, values_list)
if value
}
]
# We are always assuming that the returned list will not be empty. In the situation of a journal query with no
# value, a malformed query will be generated instead.
new_publication_info = convert_old_publication_info_to_new(old_publication_info)[0]
return new_publication_info | python | def _preprocess_journal_query_value(third_journal_field, old_publication_info_values):
"""Transforms the given journal query value (old publication info) to the new one.
Args:
third_journal_field (six.text_type): The final field to be used for populating the old publication info.
old_publication_info_values (six.text_type): The old publication info. It must be one of {only title, title
& volume, title & volume & artid/page_start}.
Returns:
(dict) The new publication info.
"""
# Prepare old publication info for :meth:`inspire_schemas.utils.convert_old_publication_info_to_new`.
publication_info_keys = [
ElasticSearchVisitor.JOURNAL_TITLE,
ElasticSearchVisitor.JOURNAL_VOLUME,
third_journal_field,
]
values_list = [
value.strip()
for value
in old_publication_info_values.split(',')
if value
]
old_publication_info = [
{
key: value
for key, value
in zip(publication_info_keys, values_list)
if value
}
]
# We are always assuming that the returned list will not be empty. In the situation of a journal query with no
# value, a malformed query will be generated instead.
new_publication_info = convert_old_publication_info_to_new(old_publication_info)[0]
return new_publication_info | [
"def",
"_preprocess_journal_query_value",
"(",
"third_journal_field",
",",
"old_publication_info_values",
")",
":",
"# Prepare old publication info for :meth:`inspire_schemas.utils.convert_old_publication_info_to_new`.",
"publication_info_keys",
"=",
"[",
"ElasticSearchVisitor",
".",
"JOURNAL_TITLE",
",",
"ElasticSearchVisitor",
".",
"JOURNAL_VOLUME",
",",
"third_journal_field",
",",
"]",
"values_list",
"=",
"[",
"value",
".",
"strip",
"(",
")",
"for",
"value",
"in",
"old_publication_info_values",
".",
"split",
"(",
"','",
")",
"if",
"value",
"]",
"old_publication_info",
"=",
"[",
"{",
"key",
":",
"value",
"for",
"key",
",",
"value",
"in",
"zip",
"(",
"publication_info_keys",
",",
"values_list",
")",
"if",
"value",
"}",
"]",
"# We are always assuming that the returned list will not be empty. In the situation of a journal query with no",
"# value, a malformed query will be generated instead.",
"new_publication_info",
"=",
"convert_old_publication_info_to_new",
"(",
"old_publication_info",
")",
"[",
"0",
"]",
"return",
"new_publication_info"
] | Transforms the given journal query value (old publication info) to the new one.
Args:
third_journal_field (six.text_type): The final field to be used for populating the old publication info.
old_publication_info_values (six.text_type): The old publication info. It must be one of {only title, title
& volume, title & volume & artid/page_start}.
Returns:
(dict) The new publication info. | [
"Transforms",
"the",
"given",
"journal",
"query",
"value",
"(",
"old",
"publication",
"info",
")",
"to",
"the",
"new",
"one",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L482-L519 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor._generate_journal_nested_queries | def _generate_journal_nested_queries(self, value):
"""Generates ElasticSearch nested query(s).
Args:
value (string): Contains the journal_title, journal_volume and artid or start_page separated by a comma.
This value should be of type string.
Notes:
The value contains at least one of the 3 mentioned items, in this order and at most 3.
The 3rd is either the artid or the page_start and it will query the corresponding ES field for this item.
The values are then split on comma and stripped of spaces before being saved in a values list in order to
be assigned to corresponding fields.
"""
# Abstract away which is the third field, we care only for its existence.
third_journal_field = ElasticSearchVisitor.JOURNAL_PAGE_START
new_publication_info = ElasticSearchVisitor._preprocess_journal_query_value(third_journal_field, value)
# We always expect a journal title, otherwise query would be considered malformed, and thus this method would
# not have been called.
queries_for_each_field = [
generate_match_query(ElasticSearchVisitor.JOURNAL_FIELDS_MAPPING[ElasticSearchVisitor.JOURNAL_TITLE],
new_publication_info[ElasticSearchVisitor.JOURNAL_TITLE],
with_operator_and=False)
]
if ElasticSearchVisitor.JOURNAL_VOLUME in new_publication_info:
queries_for_each_field.append(
generate_match_query(
ElasticSearchVisitor.JOURNAL_FIELDS_MAPPING[ElasticSearchVisitor.JOURNAL_VOLUME],
new_publication_info[ElasticSearchVisitor.JOURNAL_VOLUME],
with_operator_and=False
)
)
if third_journal_field in new_publication_info:
artid_or_page_start = new_publication_info[third_journal_field]
match_queries = [
generate_match_query(
ElasticSearchVisitor.JOURNAL_FIELDS_MAPPING[third_field],
artid_or_page_start,
with_operator_and=False
)
for third_field
in (ElasticSearchVisitor.JOURNAL_PAGE_START, ElasticSearchVisitor.JOURNAL_ART_ID)
]
queries_for_each_field.append(
wrap_queries_in_bool_clauses_if_more_than_one(match_queries, use_must_clause=False)
)
return generate_nested_query(
ElasticSearchVisitor.JOURNAL_FIELDS_PREFIX,
wrap_queries_in_bool_clauses_if_more_than_one(queries_for_each_field, use_must_clause=True)
) | python | def _generate_journal_nested_queries(self, value):
"""Generates ElasticSearch nested query(s).
Args:
value (string): Contains the journal_title, journal_volume and artid or start_page separated by a comma.
This value should be of type string.
Notes:
The value contains at least one of the 3 mentioned items, in this order and at most 3.
The 3rd is either the artid or the page_start and it will query the corresponding ES field for this item.
The values are then split on comma and stripped of spaces before being saved in a values list in order to
be assigned to corresponding fields.
"""
# Abstract away which is the third field, we care only for its existence.
third_journal_field = ElasticSearchVisitor.JOURNAL_PAGE_START
new_publication_info = ElasticSearchVisitor._preprocess_journal_query_value(third_journal_field, value)
# We always expect a journal title, otherwise query would be considered malformed, and thus this method would
# not have been called.
queries_for_each_field = [
generate_match_query(ElasticSearchVisitor.JOURNAL_FIELDS_MAPPING[ElasticSearchVisitor.JOURNAL_TITLE],
new_publication_info[ElasticSearchVisitor.JOURNAL_TITLE],
with_operator_and=False)
]
if ElasticSearchVisitor.JOURNAL_VOLUME in new_publication_info:
queries_for_each_field.append(
generate_match_query(
ElasticSearchVisitor.JOURNAL_FIELDS_MAPPING[ElasticSearchVisitor.JOURNAL_VOLUME],
new_publication_info[ElasticSearchVisitor.JOURNAL_VOLUME],
with_operator_and=False
)
)
if third_journal_field in new_publication_info:
artid_or_page_start = new_publication_info[third_journal_field]
match_queries = [
generate_match_query(
ElasticSearchVisitor.JOURNAL_FIELDS_MAPPING[third_field],
artid_or_page_start,
with_operator_and=False
)
for third_field
in (ElasticSearchVisitor.JOURNAL_PAGE_START, ElasticSearchVisitor.JOURNAL_ART_ID)
]
queries_for_each_field.append(
wrap_queries_in_bool_clauses_if_more_than_one(match_queries, use_must_clause=False)
)
return generate_nested_query(
ElasticSearchVisitor.JOURNAL_FIELDS_PREFIX,
wrap_queries_in_bool_clauses_if_more_than_one(queries_for_each_field, use_must_clause=True)
) | [
"def",
"_generate_journal_nested_queries",
"(",
"self",
",",
"value",
")",
":",
"# Abstract away which is the third field, we care only for its existence.",
"third_journal_field",
"=",
"ElasticSearchVisitor",
".",
"JOURNAL_PAGE_START",
"new_publication_info",
"=",
"ElasticSearchVisitor",
".",
"_preprocess_journal_query_value",
"(",
"third_journal_field",
",",
"value",
")",
"# We always expect a journal title, otherwise query would be considered malformed, and thus this method would",
"# not have been called.",
"queries_for_each_field",
"=",
"[",
"generate_match_query",
"(",
"ElasticSearchVisitor",
".",
"JOURNAL_FIELDS_MAPPING",
"[",
"ElasticSearchVisitor",
".",
"JOURNAL_TITLE",
"]",
",",
"new_publication_info",
"[",
"ElasticSearchVisitor",
".",
"JOURNAL_TITLE",
"]",
",",
"with_operator_and",
"=",
"False",
")",
"]",
"if",
"ElasticSearchVisitor",
".",
"JOURNAL_VOLUME",
"in",
"new_publication_info",
":",
"queries_for_each_field",
".",
"append",
"(",
"generate_match_query",
"(",
"ElasticSearchVisitor",
".",
"JOURNAL_FIELDS_MAPPING",
"[",
"ElasticSearchVisitor",
".",
"JOURNAL_VOLUME",
"]",
",",
"new_publication_info",
"[",
"ElasticSearchVisitor",
".",
"JOURNAL_VOLUME",
"]",
",",
"with_operator_and",
"=",
"False",
")",
")",
"if",
"third_journal_field",
"in",
"new_publication_info",
":",
"artid_or_page_start",
"=",
"new_publication_info",
"[",
"third_journal_field",
"]",
"match_queries",
"=",
"[",
"generate_match_query",
"(",
"ElasticSearchVisitor",
".",
"JOURNAL_FIELDS_MAPPING",
"[",
"third_field",
"]",
",",
"artid_or_page_start",
",",
"with_operator_and",
"=",
"False",
")",
"for",
"third_field",
"in",
"(",
"ElasticSearchVisitor",
".",
"JOURNAL_PAGE_START",
",",
"ElasticSearchVisitor",
".",
"JOURNAL_ART_ID",
")",
"]",
"queries_for_each_field",
".",
"append",
"(",
"wrap_queries_in_bool_clauses_if_more_than_one",
"(",
"match_queries",
",",
"use_must_clause",
"=",
"False",
")",
")",
"return",
"generate_nested_query",
"(",
"ElasticSearchVisitor",
".",
"JOURNAL_FIELDS_PREFIX",
",",
"wrap_queries_in_bool_clauses_if_more_than_one",
"(",
"queries_for_each_field",
",",
"use_must_clause",
"=",
"True",
")",
")"
] | Generates ElasticSearch nested query(s).
Args:
value (string): Contains the journal_title, journal_volume and artid or start_page separated by a comma.
This value should be of type string.
Notes:
The value contains at least one of the 3 mentioned items, in this order and at most 3.
The 3rd is either the artid or the page_start and it will query the corresponding ES field for this item.
The values are then split on comma and stripped of spaces before being saved in a values list in order to
be assigned to corresponding fields. | [
"Generates",
"ElasticSearch",
"nested",
"query",
"(",
"s",
")",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L521-L575 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor.visit_exact_match_value | def visit_exact_match_value(self, node, fieldnames=None):
"""Generates a term query (exact search in ElasticSearch)."""
if not fieldnames:
fieldnames = ['_all']
else:
fieldnames = force_list(fieldnames)
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['exact-author'] == fieldnames[0]:
return self._generate_exact_author_query(node.value)
elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['type-code'] == fieldnames[0]:
return self._generate_type_code_query(node.value)
elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['journal'] == fieldnames:
return self._generate_journal_nested_queries(node.value)
bai_fieldnames = self._generate_fieldnames_if_bai_query(
node.value,
bai_field_variation=FieldVariations.raw,
query_bai_field_if_dots_in_name=False
)
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['date'] == fieldnames:
term_queries = []
for field in fieldnames:
term_query = \
{'term': {field: _truncate_date_value_according_on_date_field(field, node.value).dumps()}}
term_queries.append(
generate_nested_query(ElasticSearchVisitor.DATE_NESTED_QUERY_PATH, term_query)
if field in ElasticSearchVisitor.DATE_NESTED_FIELDS
else term_query
)
elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] in fieldnames:
term_queries = [
generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, {'term': {field: node.value}})
for field in (bai_fieldnames or fieldnames)
]
else:
term_queries = [{'term': {field: node.value}} for field in (bai_fieldnames or fieldnames)]
return wrap_queries_in_bool_clauses_if_more_than_one(term_queries, use_must_clause=False) | python | def visit_exact_match_value(self, node, fieldnames=None):
"""Generates a term query (exact search in ElasticSearch)."""
if not fieldnames:
fieldnames = ['_all']
else:
fieldnames = force_list(fieldnames)
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['exact-author'] == fieldnames[0]:
return self._generate_exact_author_query(node.value)
elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['type-code'] == fieldnames[0]:
return self._generate_type_code_query(node.value)
elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['journal'] == fieldnames:
return self._generate_journal_nested_queries(node.value)
bai_fieldnames = self._generate_fieldnames_if_bai_query(
node.value,
bai_field_variation=FieldVariations.raw,
query_bai_field_if_dots_in_name=False
)
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['date'] == fieldnames:
term_queries = []
for field in fieldnames:
term_query = \
{'term': {field: _truncate_date_value_according_on_date_field(field, node.value).dumps()}}
term_queries.append(
generate_nested_query(ElasticSearchVisitor.DATE_NESTED_QUERY_PATH, term_query)
if field in ElasticSearchVisitor.DATE_NESTED_FIELDS
else term_query
)
elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] in fieldnames:
term_queries = [
generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, {'term': {field: node.value}})
for field in (bai_fieldnames or fieldnames)
]
else:
term_queries = [{'term': {field: node.value}} for field in (bai_fieldnames or fieldnames)]
return wrap_queries_in_bool_clauses_if_more_than_one(term_queries, use_must_clause=False) | [
"def",
"visit_exact_match_value",
"(",
"self",
",",
"node",
",",
"fieldnames",
"=",
"None",
")",
":",
"if",
"not",
"fieldnames",
":",
"fieldnames",
"=",
"[",
"'_all'",
"]",
"else",
":",
"fieldnames",
"=",
"force_list",
"(",
"fieldnames",
")",
"if",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'exact-author'",
"]",
"==",
"fieldnames",
"[",
"0",
"]",
":",
"return",
"self",
".",
"_generate_exact_author_query",
"(",
"node",
".",
"value",
")",
"elif",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'type-code'",
"]",
"==",
"fieldnames",
"[",
"0",
"]",
":",
"return",
"self",
".",
"_generate_type_code_query",
"(",
"node",
".",
"value",
")",
"elif",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'journal'",
"]",
"==",
"fieldnames",
":",
"return",
"self",
".",
"_generate_journal_nested_queries",
"(",
"node",
".",
"value",
")",
"bai_fieldnames",
"=",
"self",
".",
"_generate_fieldnames_if_bai_query",
"(",
"node",
".",
"value",
",",
"bai_field_variation",
"=",
"FieldVariations",
".",
"raw",
",",
"query_bai_field_if_dots_in_name",
"=",
"False",
")",
"if",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'date'",
"]",
"==",
"fieldnames",
":",
"term_queries",
"=",
"[",
"]",
"for",
"field",
"in",
"fieldnames",
":",
"term_query",
"=",
"{",
"'term'",
":",
"{",
"field",
":",
"_truncate_date_value_according_on_date_field",
"(",
"field",
",",
"node",
".",
"value",
")",
".",
"dumps",
"(",
")",
"}",
"}",
"term_queries",
".",
"append",
"(",
"generate_nested_query",
"(",
"ElasticSearchVisitor",
".",
"DATE_NESTED_QUERY_PATH",
",",
"term_query",
")",
"if",
"field",
"in",
"ElasticSearchVisitor",
".",
"DATE_NESTED_FIELDS",
"else",
"term_query",
")",
"elif",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'author'",
"]",
"in",
"fieldnames",
":",
"term_queries",
"=",
"[",
"generate_nested_query",
"(",
"ElasticSearchVisitor",
".",
"AUTHORS_NESTED_QUERY_PATH",
",",
"{",
"'term'",
":",
"{",
"field",
":",
"node",
".",
"value",
"}",
"}",
")",
"for",
"field",
"in",
"(",
"bai_fieldnames",
"or",
"fieldnames",
")",
"]",
"else",
":",
"term_queries",
"=",
"[",
"{",
"'term'",
":",
"{",
"field",
":",
"node",
".",
"value",
"}",
"}",
"for",
"field",
"in",
"(",
"bai_fieldnames",
"or",
"fieldnames",
")",
"]",
"return",
"wrap_queries_in_bool_clauses_if_more_than_one",
"(",
"term_queries",
",",
"use_must_clause",
"=",
"False",
")"
] | Generates a term query (exact search in ElasticSearch). | [
"Generates",
"a",
"term",
"query",
"(",
"exact",
"search",
"in",
"ElasticSearch",
")",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L753-L794 |
inspirehep/inspire-query-parser | inspire_query_parser/visitors/elastic_search_visitor.py | ElasticSearchVisitor.visit_partial_match_value | def visit_partial_match_value(self, node, fieldnames=None):
"""Generates a query which looks for a substring of the node's value in the given fieldname."""
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['date'] == fieldnames:
# Date queries with partial values are transformed into range queries, among the given and the exact
# next date, according to the granularity of the given date.
if node.contains_wildcard:
return self._generate_date_with_wildcard_query(node.value)
return self._generate_range_queries(force_list(fieldnames), {ES_RANGE_EQ_OPERATOR: node.value})
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['exact-author'] == fieldnames:
return self._generate_exact_author_query(node.value)
elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['type-code'] == fieldnames:
return self._generate_type_code_query(node.value)
elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['journal'] == fieldnames:
return self._generate_journal_nested_queries(node.value)
# Add wildcard token as prefix and suffix.
value = \
('' if node.value.startswith(ast.GenericValue.WILDCARD_TOKEN) else '*') + \
node.value + \
('' if node.value.endswith(ast.GenericValue.WILDCARD_TOKEN) else '*')
bai_fieldnames = self._generate_fieldnames_if_bai_query(
node.value,
bai_field_variation=FieldVariations.search,
query_bai_field_if_dots_in_name=True
)
query = self._generate_query_string_query(value,
fieldnames=bai_fieldnames or fieldnames,
analyze_wildcard=True)
if (bai_fieldnames and ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] in bai_fieldnames) \
or (fieldnames and ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] in fieldnames):
return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query)
return query | python | def visit_partial_match_value(self, node, fieldnames=None):
"""Generates a query which looks for a substring of the node's value in the given fieldname."""
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['date'] == fieldnames:
# Date queries with partial values are transformed into range queries, among the given and the exact
# next date, according to the granularity of the given date.
if node.contains_wildcard:
return self._generate_date_with_wildcard_query(node.value)
return self._generate_range_queries(force_list(fieldnames), {ES_RANGE_EQ_OPERATOR: node.value})
if ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['exact-author'] == fieldnames:
return self._generate_exact_author_query(node.value)
elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['type-code'] == fieldnames:
return self._generate_type_code_query(node.value)
elif ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['journal'] == fieldnames:
return self._generate_journal_nested_queries(node.value)
# Add wildcard token as prefix and suffix.
value = \
('' if node.value.startswith(ast.GenericValue.WILDCARD_TOKEN) else '*') + \
node.value + \
('' if node.value.endswith(ast.GenericValue.WILDCARD_TOKEN) else '*')
bai_fieldnames = self._generate_fieldnames_if_bai_query(
node.value,
bai_field_variation=FieldVariations.search,
query_bai_field_if_dots_in_name=True
)
query = self._generate_query_string_query(value,
fieldnames=bai_fieldnames or fieldnames,
analyze_wildcard=True)
if (bai_fieldnames and ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] in bai_fieldnames) \
or (fieldnames and ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] in fieldnames):
return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query)
return query | [
"def",
"visit_partial_match_value",
"(",
"self",
",",
"node",
",",
"fieldnames",
"=",
"None",
")",
":",
"if",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'date'",
"]",
"==",
"fieldnames",
":",
"# Date queries with partial values are transformed into range queries, among the given and the exact",
"# next date, according to the granularity of the given date.",
"if",
"node",
".",
"contains_wildcard",
":",
"return",
"self",
".",
"_generate_date_with_wildcard_query",
"(",
"node",
".",
"value",
")",
"return",
"self",
".",
"_generate_range_queries",
"(",
"force_list",
"(",
"fieldnames",
")",
",",
"{",
"ES_RANGE_EQ_OPERATOR",
":",
"node",
".",
"value",
"}",
")",
"if",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'exact-author'",
"]",
"==",
"fieldnames",
":",
"return",
"self",
".",
"_generate_exact_author_query",
"(",
"node",
".",
"value",
")",
"elif",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'type-code'",
"]",
"==",
"fieldnames",
":",
"return",
"self",
".",
"_generate_type_code_query",
"(",
"node",
".",
"value",
")",
"elif",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'journal'",
"]",
"==",
"fieldnames",
":",
"return",
"self",
".",
"_generate_journal_nested_queries",
"(",
"node",
".",
"value",
")",
"# Add wildcard token as prefix and suffix.",
"value",
"=",
"(",
"''",
"if",
"node",
".",
"value",
".",
"startswith",
"(",
"ast",
".",
"GenericValue",
".",
"WILDCARD_TOKEN",
")",
"else",
"'*'",
")",
"+",
"node",
".",
"value",
"+",
"(",
"''",
"if",
"node",
".",
"value",
".",
"endswith",
"(",
"ast",
".",
"GenericValue",
".",
"WILDCARD_TOKEN",
")",
"else",
"'*'",
")",
"bai_fieldnames",
"=",
"self",
".",
"_generate_fieldnames_if_bai_query",
"(",
"node",
".",
"value",
",",
"bai_field_variation",
"=",
"FieldVariations",
".",
"search",
",",
"query_bai_field_if_dots_in_name",
"=",
"True",
")",
"query",
"=",
"self",
".",
"_generate_query_string_query",
"(",
"value",
",",
"fieldnames",
"=",
"bai_fieldnames",
"or",
"fieldnames",
",",
"analyze_wildcard",
"=",
"True",
")",
"if",
"(",
"bai_fieldnames",
"and",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'author'",
"]",
"in",
"bai_fieldnames",
")",
"or",
"(",
"fieldnames",
"and",
"ElasticSearchVisitor",
".",
"KEYWORD_TO_ES_FIELDNAME",
"[",
"'author'",
"]",
"in",
"fieldnames",
")",
":",
"return",
"generate_nested_query",
"(",
"ElasticSearchVisitor",
".",
"AUTHORS_NESTED_QUERY_PATH",
",",
"query",
")",
"return",
"query"
] | Generates a query which looks for a substring of the node's value in the given fieldname. | [
"Generates",
"a",
"query",
"which",
"looks",
"for",
"a",
"substring",
"of",
"the",
"node",
"s",
"value",
"in",
"the",
"given",
"fieldname",
"."
] | train | https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L796-L834 |
praekeltfoundation/seed-stage-based-messaging | contentstore/tasks.py | QueueSubscriptionSend.run | def run(self, schedule_id, **kwargs):
"""
Arguments:
schedule_id {int} -- The schedule to send messages for
"""
subscriptions = Subscription.objects.filter(
schedule_id=schedule_id, active=True, completed=False, process_status=0
).values("id")
for subscription in subscriptions.iterator():
send_next_message.delay(str(subscription["id"])) | python | def run(self, schedule_id, **kwargs):
"""
Arguments:
schedule_id {int} -- The schedule to send messages for
"""
subscriptions = Subscription.objects.filter(
schedule_id=schedule_id, active=True, completed=False, process_status=0
).values("id")
for subscription in subscriptions.iterator():
send_next_message.delay(str(subscription["id"])) | [
"def",
"run",
"(",
"self",
",",
"schedule_id",
",",
"*",
"*",
"kwargs",
")",
":",
"subscriptions",
"=",
"Subscription",
".",
"objects",
".",
"filter",
"(",
"schedule_id",
"=",
"schedule_id",
",",
"active",
"=",
"True",
",",
"completed",
"=",
"False",
",",
"process_status",
"=",
"0",
")",
".",
"values",
"(",
"\"id\"",
")",
"for",
"subscription",
"in",
"subscriptions",
".",
"iterator",
"(",
")",
":",
"send_next_message",
".",
"delay",
"(",
"str",
"(",
"subscription",
"[",
"\"id\"",
"]",
")",
")"
] | Arguments:
schedule_id {int} -- The schedule to send messages for | [
"Arguments",
":",
"schedule_id",
"{",
"int",
"}",
"--",
"The",
"schedule",
"to",
"send",
"messages",
"for"
] | train | https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/contentstore/tasks.py#L83-L92 |
praekeltfoundation/seed-stage-based-messaging | contentstore/tasks.py | SyncSchedule.run | def run(self, schedule_id, **kwargs):
"""
Synchronises the schedule specified by the ID `schedule_id` to the
scheduler service.
Arguments:
schedule_id {str} -- The ID of the schedule to sync
"""
log = self.get_logger(**kwargs)
try:
schedule = Schedule.objects.get(id=schedule_id)
except Schedule.DoesNotExist:
log.error("Missing Schedule %s", schedule_id, exc_info=True)
if schedule.scheduler_schedule_id is None:
# Create the new schedule
result = self.scheduler.create_schedule(schedule.scheduler_format)
schedule.scheduler_schedule_id = result["id"]
# Disable update signal here to avoid calling twice
post_save.disconnect(schedule_saved, sender=Schedule)
schedule.save(update_fields=("scheduler_schedule_id",))
post_save.connect(schedule_saved, sender=Schedule)
log.info(
"Created schedule %s on scheduler for schedule %s",
schedule.scheduler_schedule_id,
schedule.id,
)
else:
# Update the existing schedule
result = self.scheduler.update_schedule(
str(schedule.scheduler_schedule_id), schedule.scheduler_format
)
log.info(
"Updated schedule %s on scheduler for schedule %s",
schedule.scheduler_schedule_id,
schedule.id,
) | python | def run(self, schedule_id, **kwargs):
"""
Synchronises the schedule specified by the ID `schedule_id` to the
scheduler service.
Arguments:
schedule_id {str} -- The ID of the schedule to sync
"""
log = self.get_logger(**kwargs)
try:
schedule = Schedule.objects.get(id=schedule_id)
except Schedule.DoesNotExist:
log.error("Missing Schedule %s", schedule_id, exc_info=True)
if schedule.scheduler_schedule_id is None:
# Create the new schedule
result = self.scheduler.create_schedule(schedule.scheduler_format)
schedule.scheduler_schedule_id = result["id"]
# Disable update signal here to avoid calling twice
post_save.disconnect(schedule_saved, sender=Schedule)
schedule.save(update_fields=("scheduler_schedule_id",))
post_save.connect(schedule_saved, sender=Schedule)
log.info(
"Created schedule %s on scheduler for schedule %s",
schedule.scheduler_schedule_id,
schedule.id,
)
else:
# Update the existing schedule
result = self.scheduler.update_schedule(
str(schedule.scheduler_schedule_id), schedule.scheduler_format
)
log.info(
"Updated schedule %s on scheduler for schedule %s",
schedule.scheduler_schedule_id,
schedule.id,
) | [
"def",
"run",
"(",
"self",
",",
"schedule_id",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
"=",
"self",
".",
"get_logger",
"(",
"*",
"*",
"kwargs",
")",
"try",
":",
"schedule",
"=",
"Schedule",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"schedule_id",
")",
"except",
"Schedule",
".",
"DoesNotExist",
":",
"log",
".",
"error",
"(",
"\"Missing Schedule %s\"",
",",
"schedule_id",
",",
"exc_info",
"=",
"True",
")",
"if",
"schedule",
".",
"scheduler_schedule_id",
"is",
"None",
":",
"# Create the new schedule",
"result",
"=",
"self",
".",
"scheduler",
".",
"create_schedule",
"(",
"schedule",
".",
"scheduler_format",
")",
"schedule",
".",
"scheduler_schedule_id",
"=",
"result",
"[",
"\"id\"",
"]",
"# Disable update signal here to avoid calling twice",
"post_save",
".",
"disconnect",
"(",
"schedule_saved",
",",
"sender",
"=",
"Schedule",
")",
"schedule",
".",
"save",
"(",
"update_fields",
"=",
"(",
"\"scheduler_schedule_id\"",
",",
")",
")",
"post_save",
".",
"connect",
"(",
"schedule_saved",
",",
"sender",
"=",
"Schedule",
")",
"log",
".",
"info",
"(",
"\"Created schedule %s on scheduler for schedule %s\"",
",",
"schedule",
".",
"scheduler_schedule_id",
",",
"schedule",
".",
"id",
",",
")",
"else",
":",
"# Update the existing schedule",
"result",
"=",
"self",
".",
"scheduler",
".",
"update_schedule",
"(",
"str",
"(",
"schedule",
".",
"scheduler_schedule_id",
")",
",",
"schedule",
".",
"scheduler_format",
")",
"log",
".",
"info",
"(",
"\"Updated schedule %s on scheduler for schedule %s\"",
",",
"schedule",
".",
"scheduler_schedule_id",
",",
"schedule",
".",
"id",
",",
")"
] | Synchronises the schedule specified by the ID `schedule_id` to the
scheduler service.
Arguments:
schedule_id {str} -- The ID of the schedule to sync | [
"Synchronises",
"the",
"schedule",
"specified",
"by",
"the",
"ID",
"schedule_id",
"to",
"the",
"scheduler",
"service",
"."
] | train | https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/contentstore/tasks.py#L106-L144 |
praekeltfoundation/seed-stage-based-messaging | contentstore/tasks.py | DeactivateSchedule.run | def run(self, scheduler_schedule_id, **kwargs):
"""
Deactivates the schedule specified by the ID `scheduler_schedule_id` in
the scheduler service.
Arguments:
scheduler_schedule_id {str} -- The ID of the schedule to deactivate
"""
log = self.get_logger(**kwargs)
self.scheduler.update_schedule(scheduler_schedule_id, {"active": False})
log.info(
"Deactivated schedule %s in the scheduler service", scheduler_schedule_id
) | python | def run(self, scheduler_schedule_id, **kwargs):
"""
Deactivates the schedule specified by the ID `scheduler_schedule_id` in
the scheduler service.
Arguments:
scheduler_schedule_id {str} -- The ID of the schedule to deactivate
"""
log = self.get_logger(**kwargs)
self.scheduler.update_schedule(scheduler_schedule_id, {"active": False})
log.info(
"Deactivated schedule %s in the scheduler service", scheduler_schedule_id
) | [
"def",
"run",
"(",
"self",
",",
"scheduler_schedule_id",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
"=",
"self",
".",
"get_logger",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"scheduler",
".",
"update_schedule",
"(",
"scheduler_schedule_id",
",",
"{",
"\"active\"",
":",
"False",
"}",
")",
"log",
".",
"info",
"(",
"\"Deactivated schedule %s in the scheduler service\"",
",",
"scheduler_schedule_id",
")"
] | Deactivates the schedule specified by the ID `scheduler_schedule_id` in
the scheduler service.
Arguments:
scheduler_schedule_id {str} -- The ID of the schedule to deactivate | [
"Deactivates",
"the",
"schedule",
"specified",
"by",
"the",
"ID",
"scheduler_schedule_id",
"in",
"the",
"scheduler",
"service",
"."
] | train | https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/contentstore/tasks.py#L158-L171 |
bitlabstudio/django-multilingual-tags | multilingual_tags/models.py | TagManager.get_for_model | def get_for_model(self, obj):
"""Returns the tags for a specific model/content type."""
qs = Tag.objects.language(get_language())
qs = qs.filter(
tagged_items__content_type=ctype_models.ContentType.objects.get_for_model(obj)) # NOQA
return qs.distinct() | python | def get_for_model(self, obj):
"""Returns the tags for a specific model/content type."""
qs = Tag.objects.language(get_language())
qs = qs.filter(
tagged_items__content_type=ctype_models.ContentType.objects.get_for_model(obj)) # NOQA
return qs.distinct() | [
"def",
"get_for_model",
"(",
"self",
",",
"obj",
")",
":",
"qs",
"=",
"Tag",
".",
"objects",
".",
"language",
"(",
"get_language",
"(",
")",
")",
"qs",
"=",
"qs",
".",
"filter",
"(",
"tagged_items__content_type",
"=",
"ctype_models",
".",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"obj",
")",
")",
"# NOQA",
"return",
"qs",
".",
"distinct",
"(",
")"
] | Returns the tags for a specific model/content type. | [
"Returns",
"the",
"tags",
"for",
"a",
"specific",
"model",
"/",
"content",
"type",
"."
] | train | https://github.com/bitlabstudio/django-multilingual-tags/blob/c3040d8c6275b1617b99023ce3388365190cfcbd/multilingual_tags/models.py#L12-L17 |
bitlabstudio/django-multilingual-tags | multilingual_tags/models.py | TagManager.get_for_queryset | def get_for_queryset(self, obj_queryset):
"""Returns all tags for a whole queryset of objects."""
qs = Tag.objects.language(get_language())
if obj_queryset.count() == 0:
return qs.none()
qs = qs.filter(
tagged_items__object_id__in=[
obj.id for obj in obj_queryset],
tagged_items__content_type=ctype_models.ContentType.objects.get_for_model(obj_queryset[0])) # NOQA
return qs.distinct() | python | def get_for_queryset(self, obj_queryset):
"""Returns all tags for a whole queryset of objects."""
qs = Tag.objects.language(get_language())
if obj_queryset.count() == 0:
return qs.none()
qs = qs.filter(
tagged_items__object_id__in=[
obj.id for obj in obj_queryset],
tagged_items__content_type=ctype_models.ContentType.objects.get_for_model(obj_queryset[0])) # NOQA
return qs.distinct() | [
"def",
"get_for_queryset",
"(",
"self",
",",
"obj_queryset",
")",
":",
"qs",
"=",
"Tag",
".",
"objects",
".",
"language",
"(",
"get_language",
"(",
")",
")",
"if",
"obj_queryset",
".",
"count",
"(",
")",
"==",
"0",
":",
"return",
"qs",
".",
"none",
"(",
")",
"qs",
"=",
"qs",
".",
"filter",
"(",
"tagged_items__object_id__in",
"=",
"[",
"obj",
".",
"id",
"for",
"obj",
"in",
"obj_queryset",
"]",
",",
"tagged_items__content_type",
"=",
"ctype_models",
".",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"obj_queryset",
"[",
"0",
"]",
")",
")",
"# NOQA",
"return",
"qs",
".",
"distinct",
"(",
")"
] | Returns all tags for a whole queryset of objects. | [
"Returns",
"all",
"tags",
"for",
"a",
"whole",
"queryset",
"of",
"objects",
"."
] | train | https://github.com/bitlabstudio/django-multilingual-tags/blob/c3040d8c6275b1617b99023ce3388365190cfcbd/multilingual_tags/models.py#L27-L36 |
praekeltfoundation/seed-stage-based-messaging | subscriptions/tasks.py | post_send_process | def post_send_process(context):
"""
Task to ensure subscription is bumped or converted
"""
if "error" in context:
return context
[deserialized_subscription] = serializers.deserialize(
"json", context["subscription"]
)
subscription = deserialized_subscription.object
[messageset] = serializers.deserialize("json", context["messageset"])
messageset = messageset.object
# Get set max
set_max = messageset.messages.filter(lang=subscription.lang).count()
logger.debug("set_max calculated - %s" % set_max)
# Compare user position to max
if subscription.next_sequence_number == set_max:
with transaction.atomic():
# Mark current as completed
logger.debug("marking current subscription as complete")
subscription.completed = True
subscription.active = False
subscription.process_status = 2 # Completed
deserialized_subscription.save(
update_fields=("completed", "active", "process_status")
)
# If next set defined create new subscription
if messageset.next_set:
logger.info("Creating new subscription for next set")
newsub = Subscription.objects.create(
identity=subscription.identity,
lang=subscription.lang,
messageset=messageset.next_set,
schedule=messageset.next_set.default_schedule,
)
logger.debug("Created Subscription <%s>" % newsub.id)
else:
# More in this set so increment by one
logger.debug("incrementing next_sequence_number")
subscription.next_sequence_number = F("next_sequence_number") + 1
logger.debug("setting process status back to 0")
subscription.process_status = 0
logger.debug("saving subscription")
deserialized_subscription.save(
update_fields=("next_sequence_number", "process_status")
)
# return response
return "Subscription for %s updated" % str(subscription.id) | python | def post_send_process(context):
"""
Task to ensure subscription is bumped or converted
"""
if "error" in context:
return context
[deserialized_subscription] = serializers.deserialize(
"json", context["subscription"]
)
subscription = deserialized_subscription.object
[messageset] = serializers.deserialize("json", context["messageset"])
messageset = messageset.object
# Get set max
set_max = messageset.messages.filter(lang=subscription.lang).count()
logger.debug("set_max calculated - %s" % set_max)
# Compare user position to max
if subscription.next_sequence_number == set_max:
with transaction.atomic():
# Mark current as completed
logger.debug("marking current subscription as complete")
subscription.completed = True
subscription.active = False
subscription.process_status = 2 # Completed
deserialized_subscription.save(
update_fields=("completed", "active", "process_status")
)
# If next set defined create new subscription
if messageset.next_set:
logger.info("Creating new subscription for next set")
newsub = Subscription.objects.create(
identity=subscription.identity,
lang=subscription.lang,
messageset=messageset.next_set,
schedule=messageset.next_set.default_schedule,
)
logger.debug("Created Subscription <%s>" % newsub.id)
else:
# More in this set so increment by one
logger.debug("incrementing next_sequence_number")
subscription.next_sequence_number = F("next_sequence_number") + 1
logger.debug("setting process status back to 0")
subscription.process_status = 0
logger.debug("saving subscription")
deserialized_subscription.save(
update_fields=("next_sequence_number", "process_status")
)
# return response
return "Subscription for %s updated" % str(subscription.id) | [
"def",
"post_send_process",
"(",
"context",
")",
":",
"if",
"\"error\"",
"in",
"context",
":",
"return",
"context",
"[",
"deserialized_subscription",
"]",
"=",
"serializers",
".",
"deserialize",
"(",
"\"json\"",
",",
"context",
"[",
"\"subscription\"",
"]",
")",
"subscription",
"=",
"deserialized_subscription",
".",
"object",
"[",
"messageset",
"]",
"=",
"serializers",
".",
"deserialize",
"(",
"\"json\"",
",",
"context",
"[",
"\"messageset\"",
"]",
")",
"messageset",
"=",
"messageset",
".",
"object",
"# Get set max",
"set_max",
"=",
"messageset",
".",
"messages",
".",
"filter",
"(",
"lang",
"=",
"subscription",
".",
"lang",
")",
".",
"count",
"(",
")",
"logger",
".",
"debug",
"(",
"\"set_max calculated - %s\"",
"%",
"set_max",
")",
"# Compare user position to max",
"if",
"subscription",
".",
"next_sequence_number",
"==",
"set_max",
":",
"with",
"transaction",
".",
"atomic",
"(",
")",
":",
"# Mark current as completed",
"logger",
".",
"debug",
"(",
"\"marking current subscription as complete\"",
")",
"subscription",
".",
"completed",
"=",
"True",
"subscription",
".",
"active",
"=",
"False",
"subscription",
".",
"process_status",
"=",
"2",
"# Completed",
"deserialized_subscription",
".",
"save",
"(",
"update_fields",
"=",
"(",
"\"completed\"",
",",
"\"active\"",
",",
"\"process_status\"",
")",
")",
"# If next set defined create new subscription",
"if",
"messageset",
".",
"next_set",
":",
"logger",
".",
"info",
"(",
"\"Creating new subscription for next set\"",
")",
"newsub",
"=",
"Subscription",
".",
"objects",
".",
"create",
"(",
"identity",
"=",
"subscription",
".",
"identity",
",",
"lang",
"=",
"subscription",
".",
"lang",
",",
"messageset",
"=",
"messageset",
".",
"next_set",
",",
"schedule",
"=",
"messageset",
".",
"next_set",
".",
"default_schedule",
",",
")",
"logger",
".",
"debug",
"(",
"\"Created Subscription <%s>\"",
"%",
"newsub",
".",
"id",
")",
"else",
":",
"# More in this set so increment by one",
"logger",
".",
"debug",
"(",
"\"incrementing next_sequence_number\"",
")",
"subscription",
".",
"next_sequence_number",
"=",
"F",
"(",
"\"next_sequence_number\"",
")",
"+",
"1",
"logger",
".",
"debug",
"(",
"\"setting process status back to 0\"",
")",
"subscription",
".",
"process_status",
"=",
"0",
"logger",
".",
"debug",
"(",
"\"saving subscription\"",
")",
"deserialized_subscription",
".",
"save",
"(",
"update_fields",
"=",
"(",
"\"next_sequence_number\"",
",",
"\"process_status\"",
")",
")",
"# return response",
"return",
"\"Subscription for %s updated\"",
"%",
"str",
"(",
"subscription",
".",
"id",
")"
] | Task to ensure subscription is bumped or converted | [
"Task",
"to",
"ensure",
"subscription",
"is",
"bumped",
"or",
"converted"
] | train | https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/subscriptions/tasks.py#L337-L387 |
Subsets and Splits