repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
beregond/jsonmodels | jsonmodels/fields.py | BoolField.parse_value | def parse_value(self, value):
"""Cast value to `bool`."""
parsed = super(BoolField, self).parse_value(value)
return bool(parsed) if parsed is not None else None | python | def parse_value(self, value):
"""Cast value to `bool`."""
parsed = super(BoolField, self).parse_value(value)
return bool(parsed) if parsed is not None else None | [
"def",
"parse_value",
"(",
"self",
",",
"value",
")",
":",
"parsed",
"=",
"super",
"(",
"BoolField",
",",
"self",
")",
".",
"parse_value",
"(",
"value",
")",
"return",
"bool",
"(",
"parsed",
")",
"if",
"parsed",
"is",
"not",
"None",
"else",
"None"
] | Cast value to `bool`. | [
"Cast",
"value",
"to",
"bool",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/fields.py#L178-L181 |
beregond/jsonmodels | jsonmodels/fields.py | ListField.parse_value | def parse_value(self, values):
"""Cast value to proper collection."""
result = self.get_default_value()
if not values:
return result
if not isinstance(values, list):
return values
return [self._cast_value(value) for value in values] | python | def parse_value(self, values):
"""Cast value to proper collection."""
result = self.get_default_value()
if not values:
return result
if not isinstance(values, list):
return values
return [self._cast_value(value) for value in values] | [
"def",
"parse_value",
"(",
"self",
",",
"values",
")",
":",
"result",
"=",
"self",
".",
"get_default_value",
"(",
")",
"if",
"not",
"values",
":",
"return",
"result",
"if",
"not",
"isinstance",
"(",
"values",
",",
"list",
")",
":",
"return",
"values",
"return",
"[",
"self",
".",
"_cast_value",
"(",
"value",
")",
"for",
"value",
"in",
"values",
"]"
] | Cast value to proper collection. | [
"Cast",
"value",
"to",
"proper",
"collection",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/fields.py#L245-L255 |
beregond/jsonmodels | jsonmodels/fields.py | EmbeddedField.parse_value | def parse_value(self, value):
"""Parse value to proper model type."""
if not isinstance(value, dict):
return value
embed_type = self._get_embed_type()
return embed_type(**value) | python | def parse_value(self, value):
"""Parse value to proper model type."""
if not isinstance(value, dict):
return value
embed_type = self._get_embed_type()
return embed_type(**value) | [
"def",
"parse_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"value",
"embed_type",
"=",
"self",
".",
"_get_embed_type",
"(",
")",
"return",
"embed_type",
"(",
"*",
"*",
"value",
")"
] | Parse value to proper model type. | [
"Parse",
"value",
"to",
"proper",
"model",
"type",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/fields.py#L329-L335 |
beregond/jsonmodels | jsonmodels/fields.py | TimeField.to_struct | def to_struct(self, value):
"""Cast `time` object to string."""
if self.str_format:
return value.strftime(self.str_format)
return value.isoformat() | python | def to_struct(self, value):
"""Cast `time` object to string."""
if self.str_format:
return value.strftime(self.str_format)
return value.isoformat() | [
"def",
"to_struct",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"str_format",
":",
"return",
"value",
".",
"strftime",
"(",
"self",
".",
"str_format",
")",
"return",
"value",
".",
"isoformat",
"(",
")"
] | Cast `time` object to string. | [
"Cast",
"time",
"object",
"to",
"string",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/fields.py#L412-L416 |
beregond/jsonmodels | jsonmodels/fields.py | TimeField.parse_value | def parse_value(self, value):
"""Parse string into instance of `time`."""
if value is None:
return value
if isinstance(value, datetime.time):
return value
return parse(value).timetz() | python | def parse_value(self, value):
"""Parse string into instance of `time`."""
if value is None:
return value
if isinstance(value, datetime.time):
return value
return parse(value).timetz() | [
"def",
"parse_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"value",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"time",
")",
":",
"return",
"value",
"return",
"parse",
"(",
"value",
")",
".",
"timetz",
"(",
")"
] | Parse string into instance of `time`. | [
"Parse",
"string",
"into",
"instance",
"of",
"time",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/fields.py#L418-L424 |
beregond/jsonmodels | jsonmodels/fields.py | DateField.to_struct | def to_struct(self, value):
"""Cast `date` object to string."""
if self.str_format:
return value.strftime(self.str_format)
return value.strftime(self.default_format) | python | def to_struct(self, value):
"""Cast `date` object to string."""
if self.str_format:
return value.strftime(self.str_format)
return value.strftime(self.default_format) | [
"def",
"to_struct",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"str_format",
":",
"return",
"value",
".",
"strftime",
"(",
"self",
".",
"str_format",
")",
"return",
"value",
".",
"strftime",
"(",
"self",
".",
"default_format",
")"
] | Cast `date` object to string. | [
"Cast",
"date",
"object",
"to",
"string",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/fields.py#L444-L448 |
beregond/jsonmodels | jsonmodels/fields.py | DateTimeField.parse_value | def parse_value(self, value):
"""Parse string into instance of `datetime`."""
if isinstance(value, datetime.datetime):
return value
if value:
return parse(value)
else:
return None | python | def parse_value(self, value):
"""Parse string into instance of `datetime`."""
if isinstance(value, datetime.datetime):
return value
if value:
return parse(value)
else:
return None | [
"def",
"parse_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
":",
"return",
"value",
"if",
"value",
":",
"return",
"parse",
"(",
"value",
")",
"else",
":",
"return",
"None"
] | Parse string into instance of `datetime`. | [
"Parse",
"string",
"into",
"instance",
"of",
"datetime",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/fields.py#L481-L488 |
beregond/jsonmodels | jsonmodels/validators.py | Min.validate | def validate(self, value):
"""Validate value."""
if self.exclusive:
if value <= self.minimum_value:
tpl = "'{value}' is lower or equal than minimum ('{min}')."
raise ValidationError(
tpl.format(value=value, min=self.minimum_value))
else:
if value < self.minimum_value:
raise ValidationError(
"'{value}' is lower than minimum ('{min}').".format(
value=value, min=self.minimum_value)) | python | def validate(self, value):
"""Validate value."""
if self.exclusive:
if value <= self.minimum_value:
tpl = "'{value}' is lower or equal than minimum ('{min}')."
raise ValidationError(
tpl.format(value=value, min=self.minimum_value))
else:
if value < self.minimum_value:
raise ValidationError(
"'{value}' is lower than minimum ('{min}').".format(
value=value, min=self.minimum_value)) | [
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"exclusive",
":",
"if",
"value",
"<=",
"self",
".",
"minimum_value",
":",
"tpl",
"=",
"\"'{value}' is lower or equal than minimum ('{min}').\"",
"raise",
"ValidationError",
"(",
"tpl",
".",
"format",
"(",
"value",
"=",
"value",
",",
"min",
"=",
"self",
".",
"minimum_value",
")",
")",
"else",
":",
"if",
"value",
"<",
"self",
".",
"minimum_value",
":",
"raise",
"ValidationError",
"(",
"\"'{value}' is lower than minimum ('{min}').\"",
".",
"format",
"(",
"value",
"=",
"value",
",",
"min",
"=",
"self",
".",
"minimum_value",
")",
")"
] | Validate value. | [
"Validate",
"value",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/validators.py#L25-L36 |
beregond/jsonmodels | jsonmodels/validators.py | Min.modify_schema | def modify_schema(self, field_schema):
"""Modify field schema."""
field_schema['minimum'] = self.minimum_value
if self.exclusive:
field_schema['exclusiveMinimum'] = True | python | def modify_schema(self, field_schema):
"""Modify field schema."""
field_schema['minimum'] = self.minimum_value
if self.exclusive:
field_schema['exclusiveMinimum'] = True | [
"def",
"modify_schema",
"(",
"self",
",",
"field_schema",
")",
":",
"field_schema",
"[",
"'minimum'",
"]",
"=",
"self",
".",
"minimum_value",
"if",
"self",
".",
"exclusive",
":",
"field_schema",
"[",
"'exclusiveMinimum'",
"]",
"=",
"True"
] | Modify field schema. | [
"Modify",
"field",
"schema",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/validators.py#L38-L42 |
beregond/jsonmodels | jsonmodels/validators.py | Max.validate | def validate(self, value):
"""Validate value."""
if self.exclusive:
if value >= self.maximum_value:
tpl = "'{val}' is bigger or equal than maximum ('{max}')."
raise ValidationError(
tpl.format(val=value, max=self.maximum_value))
else:
if value > self.maximum_value:
raise ValidationError(
"'{value}' is bigger than maximum ('{max}').".format(
value=value, max=self.maximum_value)) | python | def validate(self, value):
"""Validate value."""
if self.exclusive:
if value >= self.maximum_value:
tpl = "'{val}' is bigger or equal than maximum ('{max}')."
raise ValidationError(
tpl.format(val=value, max=self.maximum_value))
else:
if value > self.maximum_value:
raise ValidationError(
"'{value}' is bigger than maximum ('{max}').".format(
value=value, max=self.maximum_value)) | [
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"exclusive",
":",
"if",
"value",
">=",
"self",
".",
"maximum_value",
":",
"tpl",
"=",
"\"'{val}' is bigger or equal than maximum ('{max}').\"",
"raise",
"ValidationError",
"(",
"tpl",
".",
"format",
"(",
"val",
"=",
"value",
",",
"max",
"=",
"self",
".",
"maximum_value",
")",
")",
"else",
":",
"if",
"value",
">",
"self",
".",
"maximum_value",
":",
"raise",
"ValidationError",
"(",
"\"'{value}' is bigger than maximum ('{max}').\"",
".",
"format",
"(",
"value",
"=",
"value",
",",
"max",
"=",
"self",
".",
"maximum_value",
")",
")"
] | Validate value. | [
"Validate",
"value",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/validators.py#L60-L71 |
beregond/jsonmodels | jsonmodels/validators.py | Max.modify_schema | def modify_schema(self, field_schema):
"""Modify field schema."""
field_schema['maximum'] = self.maximum_value
if self.exclusive:
field_schema['exclusiveMaximum'] = True | python | def modify_schema(self, field_schema):
"""Modify field schema."""
field_schema['maximum'] = self.maximum_value
if self.exclusive:
field_schema['exclusiveMaximum'] = True | [
"def",
"modify_schema",
"(",
"self",
",",
"field_schema",
")",
":",
"field_schema",
"[",
"'maximum'",
"]",
"=",
"self",
".",
"maximum_value",
"if",
"self",
".",
"exclusive",
":",
"field_schema",
"[",
"'exclusiveMaximum'",
"]",
"=",
"True"
] | Modify field schema. | [
"Modify",
"field",
"schema",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/validators.py#L73-L77 |
beregond/jsonmodels | jsonmodels/validators.py | Regex.validate | def validate(self, value):
"""Validate value."""
flags = self._calculate_flags()
try:
result = re.search(self.pattern, value, flags)
except TypeError as te:
raise ValidationError(*te.args)
if not result:
raise ValidationError(
'Value "{value}" did not match pattern "{pattern}".'.format(
value=value, pattern=self.pattern
)) | python | def validate(self, value):
"""Validate value."""
flags = self._calculate_flags()
try:
result = re.search(self.pattern, value, flags)
except TypeError as te:
raise ValidationError(*te.args)
if not result:
raise ValidationError(
'Value "{value}" did not match pattern "{pattern}".'.format(
value=value, pattern=self.pattern
)) | [
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"flags",
"=",
"self",
".",
"_calculate_flags",
"(",
")",
"try",
":",
"result",
"=",
"re",
".",
"search",
"(",
"self",
".",
"pattern",
",",
"value",
",",
"flags",
")",
"except",
"TypeError",
"as",
"te",
":",
"raise",
"ValidationError",
"(",
"*",
"te",
".",
"args",
")",
"if",
"not",
"result",
":",
"raise",
"ValidationError",
"(",
"'Value \"{value}\" did not match pattern \"{pattern}\".'",
".",
"format",
"(",
"value",
"=",
"value",
",",
"pattern",
"=",
"self",
".",
"pattern",
")",
")"
] | Validate value. | [
"Validate",
"value",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/validators.py#L112-L125 |
beregond/jsonmodels | jsonmodels/validators.py | Regex.modify_schema | def modify_schema(self, field_schema):
"""Modify field schema."""
field_schema['pattern'] = utilities.convert_python_regex_to_ecma(
self.pattern, self.flags) | python | def modify_schema(self, field_schema):
"""Modify field schema."""
field_schema['pattern'] = utilities.convert_python_regex_to_ecma(
self.pattern, self.flags) | [
"def",
"modify_schema",
"(",
"self",
",",
"field_schema",
")",
":",
"field_schema",
"[",
"'pattern'",
"]",
"=",
"utilities",
".",
"convert_python_regex_to_ecma",
"(",
"self",
".",
"pattern",
",",
"self",
".",
"flags",
")"
] | Modify field schema. | [
"Modify",
"field",
"schema",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/validators.py#L130-L133 |
beregond/jsonmodels | jsonmodels/validators.py | Length.validate | def validate(self, value):
"""Validate value."""
len_ = len(value)
if self.minimum_value is not None and len_ < self.minimum_value:
tpl = "Value '{val}' length is lower than allowed minimum '{min}'."
raise ValidationError(tpl.format(
val=value, min=self.minimum_value
))
if self.maximum_value is not None and len_ > self.maximum_value:
raise ValidationError(
"Value '{val}' length is bigger than "
"allowed maximum '{max}'.".format(
val=value,
max=self.maximum_value,
)) | python | def validate(self, value):
"""Validate value."""
len_ = len(value)
if self.minimum_value is not None and len_ < self.minimum_value:
tpl = "Value '{val}' length is lower than allowed minimum '{min}'."
raise ValidationError(tpl.format(
val=value, min=self.minimum_value
))
if self.maximum_value is not None and len_ > self.maximum_value:
raise ValidationError(
"Value '{val}' length is bigger than "
"allowed maximum '{max}'.".format(
val=value,
max=self.maximum_value,
)) | [
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"len_",
"=",
"len",
"(",
"value",
")",
"if",
"self",
".",
"minimum_value",
"is",
"not",
"None",
"and",
"len_",
"<",
"self",
".",
"minimum_value",
":",
"tpl",
"=",
"\"Value '{val}' length is lower than allowed minimum '{min}'.\"",
"raise",
"ValidationError",
"(",
"tpl",
".",
"format",
"(",
"val",
"=",
"value",
",",
"min",
"=",
"self",
".",
"minimum_value",
")",
")",
"if",
"self",
".",
"maximum_value",
"is",
"not",
"None",
"and",
"len_",
">",
"self",
".",
"maximum_value",
":",
"raise",
"ValidationError",
"(",
"\"Value '{val}' length is bigger than \"",
"\"allowed maximum '{max}'.\"",
".",
"format",
"(",
"val",
"=",
"value",
",",
"max",
"=",
"self",
".",
"maximum_value",
",",
")",
")"
] | Validate value. | [
"Validate",
"value",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/validators.py#L157-L173 |
beregond/jsonmodels | jsonmodels/validators.py | Length.modify_schema | def modify_schema(self, field_schema):
"""Modify field schema."""
if self.minimum_value:
field_schema['minLength'] = self.minimum_value
if self.maximum_value:
field_schema['maxLength'] = self.maximum_value | python | def modify_schema(self, field_schema):
"""Modify field schema."""
if self.minimum_value:
field_schema['minLength'] = self.minimum_value
if self.maximum_value:
field_schema['maxLength'] = self.maximum_value | [
"def",
"modify_schema",
"(",
"self",
",",
"field_schema",
")",
":",
"if",
"self",
".",
"minimum_value",
":",
"field_schema",
"[",
"'minLength'",
"]",
"=",
"self",
".",
"minimum_value",
"if",
"self",
".",
"maximum_value",
":",
"field_schema",
"[",
"'maxLength'",
"]",
"=",
"self",
".",
"maximum_value"
] | Modify field schema. | [
"Modify",
"field",
"schema",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/validators.py#L175-L181 |
beregond/jsonmodels | jsonmodels/parsers.py | to_struct | def to_struct(model):
"""Cast instance of model to python structure.
:param model: Model to be casted.
:rtype: ``dict``
"""
model.validate()
resp = {}
for _, name, field in model.iterate_with_name():
value = field.__get__(model)
if value is None:
continue
value = field.to_struct(value)
resp[name] = value
return resp | python | def to_struct(model):
"""Cast instance of model to python structure.
:param model: Model to be casted.
:rtype: ``dict``
"""
model.validate()
resp = {}
for _, name, field in model.iterate_with_name():
value = field.__get__(model)
if value is None:
continue
value = field.to_struct(value)
resp[name] = value
return resp | [
"def",
"to_struct",
"(",
"model",
")",
":",
"model",
".",
"validate",
"(",
")",
"resp",
"=",
"{",
"}",
"for",
"_",
",",
"name",
",",
"field",
"in",
"model",
".",
"iterate_with_name",
"(",
")",
":",
"value",
"=",
"field",
".",
"__get__",
"(",
"model",
")",
"if",
"value",
"is",
"None",
":",
"continue",
"value",
"=",
"field",
".",
"to_struct",
"(",
"value",
")",
"resp",
"[",
"name",
"]",
"=",
"value",
"return",
"resp"
] | Cast instance of model to python structure.
:param model: Model to be casted.
:rtype: ``dict`` | [
"Cast",
"instance",
"of",
"model",
"to",
"python",
"structure",
"."
] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/parsers.py#L7-L24 |
noirbizarre/django.js | djangojs/management/commands/js.py | Command.run_from_argv | def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr.
"""
parser = self.create_parser(argv[0], argv[1])
args = parser.parse_args(argv[2:])
handle_default_options(args)
try:
self.execute(args)
except Exception as e:
# self.stderr is not guaranteed to be set here
try:
fallback_stderr = OutputWrapper(sys.stderr, self.style.ERROR)
except:
fallback_stderr = self.stdout
stderr = getattr(self, 'stderr', fallback_stderr)
if args.traceback:
stderr.write(traceback.format_exc())
else:
stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1) | python | def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr.
"""
parser = self.create_parser(argv[0], argv[1])
args = parser.parse_args(argv[2:])
handle_default_options(args)
try:
self.execute(args)
except Exception as e:
# self.stderr is not guaranteed to be set here
try:
fallback_stderr = OutputWrapper(sys.stderr, self.style.ERROR)
except:
fallback_stderr = self.stdout
stderr = getattr(self, 'stderr', fallback_stderr)
if args.traceback:
stderr.write(traceback.format_exc())
else:
stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1) | [
"def",
"run_from_argv",
"(",
"self",
",",
"argv",
")",
":",
"parser",
"=",
"self",
".",
"create_parser",
"(",
"argv",
"[",
"0",
"]",
",",
"argv",
"[",
"1",
"]",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
"[",
"2",
":",
"]",
")",
"handle_default_options",
"(",
"args",
")",
"try",
":",
"self",
".",
"execute",
"(",
"args",
")",
"except",
"Exception",
"as",
"e",
":",
"# self.stderr is not guaranteed to be set here",
"try",
":",
"fallback_stderr",
"=",
"OutputWrapper",
"(",
"sys",
".",
"stderr",
",",
"self",
".",
"style",
".",
"ERROR",
")",
"except",
":",
"fallback_stderr",
"=",
"self",
".",
"stdout",
"stderr",
"=",
"getattr",
"(",
"self",
",",
"'stderr'",
",",
"fallback_stderr",
")",
"if",
"args",
".",
"traceback",
":",
"stderr",
".",
"write",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"else",
":",
"stderr",
".",
"write",
"(",
"'%s: %s'",
"%",
"(",
"e",
".",
"__class__",
".",
"__name__",
",",
"e",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. | [
"Set",
"up",
"any",
"environment",
"changes",
"requested",
"(",
"e",
".",
"g",
".",
"Python",
"path",
"and",
"Django",
"settings",
")",
"then",
"run",
"this",
"command",
".",
"If",
"the",
"command",
"raises",
"a",
"CommandError",
"intercept",
"it",
"and",
"print",
"it",
"sensibly",
"to",
"stderr",
"."
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/management/commands/js.py#L36-L59 |
noirbizarre/django.js | djangojs/management/commands/js.py | Command.create_parser | def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
parser = argparse.ArgumentParser(prog='%s %s' % (prog_name, subcommand), description=self.help)
parser.add_argument('-v', '--verbosity', action='store', default=1, type=int, choices=range(4),
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'),
parser.add_argument('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". '
'If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
parser.add_argument('--traceback', action='store_true', help='Print traceback on exception'),
subparsers = parser.add_subparsers(description='JavaScript command to execute')
for subparser in self.subparsers:
subparser(self, subparsers)
return parser | python | def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
parser = argparse.ArgumentParser(prog='%s %s' % (prog_name, subcommand), description=self.help)
parser.add_argument('-v', '--verbosity', action='store', default=1, type=int, choices=range(4),
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'),
parser.add_argument('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". '
'If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
parser.add_argument('--traceback', action='store_true', help='Print traceback on exception'),
subparsers = parser.add_subparsers(description='JavaScript command to execute')
for subparser in self.subparsers:
subparser(self, subparsers)
return parser | [
"def",
"create_parser",
"(",
"self",
",",
"prog_name",
",",
"subcommand",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"'%s %s'",
"%",
"(",
"prog_name",
",",
"subcommand",
")",
",",
"description",
"=",
"self",
".",
"help",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbosity'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"1",
",",
"type",
"=",
"int",
",",
"choices",
"=",
"range",
"(",
"4",
")",
",",
"help",
"=",
"'Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'",
")",
",",
"parser",
".",
"add_argument",
"(",
"'--settings'",
",",
"help",
"=",
"'The Python path to a settings module, e.g. \"myproject.settings.main\". '",
"'If this isn\\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'",
")",
",",
"parser",
".",
"add_argument",
"(",
"'--pythonpath'",
",",
"help",
"=",
"'A directory to add to the Python path, e.g. \"/home/djangoprojects/myproject\".'",
")",
",",
"parser",
".",
"add_argument",
"(",
"'--traceback'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Print traceback on exception'",
")",
",",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"description",
"=",
"'JavaScript command to execute'",
")",
"for",
"subparser",
"in",
"self",
".",
"subparsers",
":",
"subparser",
"(",
"self",
",",
"subparsers",
")",
"return",
"parser"
] | Create and return the ``OptionParser`` which will be used to
parse the arguments to this command. | [
"Create",
"and",
"return",
"the",
"OptionParser",
"which",
"will",
"be",
"used",
"to",
"parse",
"the",
"arguments",
"to",
"this",
"command",
"."
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/management/commands/js.py#L61-L83 |
noirbizarre/django.js | setup.py | rst | def rst(filename):
'''
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge
'''
content = codecs.open(filename, encoding='utf-8').read()
for regex, replacement in PYPI_RST_FILTERS:
content = re.sub(regex, replacement, content)
return content | python | def rst(filename):
'''
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge
'''
content = codecs.open(filename, encoding='utf-8').read()
for regex, replacement in PYPI_RST_FILTERS:
content = re.sub(regex, replacement, content)
return content | [
"def",
"rst",
"(",
"filename",
")",
":",
"content",
"=",
"codecs",
".",
"open",
"(",
"filename",
",",
"encoding",
"=",
"'utf-8'",
")",
".",
"read",
"(",
")",
"for",
"regex",
",",
"replacement",
"in",
"PYPI_RST_FILTERS",
":",
"content",
"=",
"re",
".",
"sub",
"(",
"regex",
",",
"replacement",
",",
"content",
")",
"return",
"content"
] | Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge | [
"Load",
"rst",
"file",
"and",
"sanitize",
"it",
"for",
"PyPI",
".",
"Remove",
"unsupported",
"github",
"tags",
":",
"-",
"code",
"-",
"block",
"directive",
"-",
"travis",
"ci",
"build",
"badge"
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/setup.py#L22-L32 |
noirbizarre/django.js | djangojs/templatetags/js.py | javascript | def javascript(filename, type='text/javascript'):
'''A simple shortcut to render a ``script`` tag to a static javascript file'''
if '?' in filename and len(filename.split('?')) is 2:
filename, params = filename.split('?')
return '<script type="%s" src="%s?%s"></script>' % (type, staticfiles_storage.url(filename), params)
else:
return '<script type="%s" src="%s"></script>' % (type, staticfiles_storage.url(filename)) | python | def javascript(filename, type='text/javascript'):
'''A simple shortcut to render a ``script`` tag to a static javascript file'''
if '?' in filename and len(filename.split('?')) is 2:
filename, params = filename.split('?')
return '<script type="%s" src="%s?%s"></script>' % (type, staticfiles_storage.url(filename), params)
else:
return '<script type="%s" src="%s"></script>' % (type, staticfiles_storage.url(filename)) | [
"def",
"javascript",
"(",
"filename",
",",
"type",
"=",
"'text/javascript'",
")",
":",
"if",
"'?'",
"in",
"filename",
"and",
"len",
"(",
"filename",
".",
"split",
"(",
"'?'",
")",
")",
"is",
"2",
":",
"filename",
",",
"params",
"=",
"filename",
".",
"split",
"(",
"'?'",
")",
"return",
"'<script type=\"%s\" src=\"%s?%s\"></script>'",
"%",
"(",
"type",
",",
"staticfiles_storage",
".",
"url",
"(",
"filename",
")",
",",
"params",
")",
"else",
":",
"return",
"'<script type=\"%s\" src=\"%s\"></script>'",
"%",
"(",
"type",
",",
"staticfiles_storage",
".",
"url",
"(",
"filename",
")",
")"
] | A simple shortcut to render a ``script`` tag to a static javascript file | [
"A",
"simple",
"shortcut",
"to",
"render",
"a",
"script",
"tag",
"to",
"a",
"static",
"javascript",
"file"
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/templatetags/js.py#L111-L117 |
noirbizarre/django.js | djangojs/templatetags/js.py | jquery_js | def jquery_js(version=None, migrate=False):
'''A shortcut to render a ``script`` tag for the packaged jQuery'''
version = version or settings.JQUERY_VERSION
suffix = '.min' if not settings.DEBUG else ''
libs = [js_lib('jquery-%s%s.js' % (version, suffix))]
if _boolean(migrate):
libs.append(js_lib('jquery-migrate-%s%s.js' % (JQUERY_MIGRATE_VERSION, suffix)))
return '\n'.join(libs) | python | def jquery_js(version=None, migrate=False):
'''A shortcut to render a ``script`` tag for the packaged jQuery'''
version = version or settings.JQUERY_VERSION
suffix = '.min' if not settings.DEBUG else ''
libs = [js_lib('jquery-%s%s.js' % (version, suffix))]
if _boolean(migrate):
libs.append(js_lib('jquery-migrate-%s%s.js' % (JQUERY_MIGRATE_VERSION, suffix)))
return '\n'.join(libs) | [
"def",
"jquery_js",
"(",
"version",
"=",
"None",
",",
"migrate",
"=",
"False",
")",
":",
"version",
"=",
"version",
"or",
"settings",
".",
"JQUERY_VERSION",
"suffix",
"=",
"'.min'",
"if",
"not",
"settings",
".",
"DEBUG",
"else",
"''",
"libs",
"=",
"[",
"js_lib",
"(",
"'jquery-%s%s.js'",
"%",
"(",
"version",
",",
"suffix",
")",
")",
"]",
"if",
"_boolean",
"(",
"migrate",
")",
":",
"libs",
".",
"append",
"(",
"js_lib",
"(",
"'jquery-migrate-%s%s.js'",
"%",
"(",
"JQUERY_MIGRATE_VERSION",
",",
"suffix",
")",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"libs",
")"
] | A shortcut to render a ``script`` tag for the packaged jQuery | [
"A",
"shortcut",
"to",
"render",
"a",
"script",
"tag",
"for",
"the",
"packaged",
"jQuery"
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/templatetags/js.py#L156-L163 |
noirbizarre/django.js | djangojs/templatetags/js.py | django_js | def django_js(context, jquery=True, i18n=True, csrf=True, init=True):
'''Include Django.js javascript library in the page'''
return {
'js': {
'minified': not settings.DEBUG,
'jquery': _boolean(jquery),
'i18n': _boolean(i18n),
'csrf': _boolean(csrf),
'init': _boolean(init),
}
} | python | def django_js(context, jquery=True, i18n=True, csrf=True, init=True):
'''Include Django.js javascript library in the page'''
return {
'js': {
'minified': not settings.DEBUG,
'jquery': _boolean(jquery),
'i18n': _boolean(i18n),
'csrf': _boolean(csrf),
'init': _boolean(init),
}
} | [
"def",
"django_js",
"(",
"context",
",",
"jquery",
"=",
"True",
",",
"i18n",
"=",
"True",
",",
"csrf",
"=",
"True",
",",
"init",
"=",
"True",
")",
":",
"return",
"{",
"'js'",
":",
"{",
"'minified'",
":",
"not",
"settings",
".",
"DEBUG",
",",
"'jquery'",
":",
"_boolean",
"(",
"jquery",
")",
",",
"'i18n'",
":",
"_boolean",
"(",
"i18n",
")",
",",
"'csrf'",
":",
"_boolean",
"(",
"csrf",
")",
",",
"'init'",
":",
"_boolean",
"(",
"init",
")",
",",
"}",
"}"
] | Include Django.js javascript library in the page | [
"Include",
"Django",
".",
"js",
"javascript",
"library",
"in",
"the",
"page"
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/templatetags/js.py#L167-L177 |
noirbizarre/django.js | djangojs/templatetags/js.py | django_js_init | def django_js_init(context, jquery=False, i18n=True, csrf=True, init=True):
'''Include Django.js javascript library initialization in the page'''
return {
'js': {
'jquery': _boolean(jquery),
'i18n': _boolean(i18n),
'csrf': _boolean(csrf),
'init': _boolean(init),
}
} | python | def django_js_init(context, jquery=False, i18n=True, csrf=True, init=True):
'''Include Django.js javascript library initialization in the page'''
return {
'js': {
'jquery': _boolean(jquery),
'i18n': _boolean(i18n),
'csrf': _boolean(csrf),
'init': _boolean(init),
}
} | [
"def",
"django_js_init",
"(",
"context",
",",
"jquery",
"=",
"False",
",",
"i18n",
"=",
"True",
",",
"csrf",
"=",
"True",
",",
"init",
"=",
"True",
")",
":",
"return",
"{",
"'js'",
":",
"{",
"'jquery'",
":",
"_boolean",
"(",
"jquery",
")",
",",
"'i18n'",
":",
"_boolean",
"(",
"i18n",
")",
",",
"'csrf'",
":",
"_boolean",
"(",
"csrf",
")",
",",
"'init'",
":",
"_boolean",
"(",
"init",
")",
",",
"}",
"}"
] | Include Django.js javascript library initialization in the page | [
"Include",
"Django",
".",
"js",
"javascript",
"library",
"initialization",
"in",
"the",
"page"
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/templatetags/js.py#L181-L190 |
noirbizarre/django.js | djangojs/context_serializer.py | ContextSerializer.as_dict | def as_dict(self):
'''
Serialize the context as a dictionnary from a given request.
'''
data = {}
if settings.JS_CONTEXT_ENABLED:
for context in RequestContext(self.request):
for key, value in six.iteritems(context):
if settings.JS_CONTEXT and key not in settings.JS_CONTEXT:
continue
if settings.JS_CONTEXT_EXCLUDE and key in settings.JS_CONTEXT_EXCLUDE:
continue
handler_name = 'process_%s' % key
if hasattr(self, handler_name):
handler = getattr(self, handler_name)
data[key] = handler(value, data)
elif isinstance(value, SERIALIZABLE_TYPES):
data[key] = value
if settings.JS_USER_ENABLED:
self.handle_user(data)
return data | python | def as_dict(self):
'''
Serialize the context as a dictionnary from a given request.
'''
data = {}
if settings.JS_CONTEXT_ENABLED:
for context in RequestContext(self.request):
for key, value in six.iteritems(context):
if settings.JS_CONTEXT and key not in settings.JS_CONTEXT:
continue
if settings.JS_CONTEXT_EXCLUDE and key in settings.JS_CONTEXT_EXCLUDE:
continue
handler_name = 'process_%s' % key
if hasattr(self, handler_name):
handler = getattr(self, handler_name)
data[key] = handler(value, data)
elif isinstance(value, SERIALIZABLE_TYPES):
data[key] = value
if settings.JS_USER_ENABLED:
self.handle_user(data)
return data | [
"def",
"as_dict",
"(",
"self",
")",
":",
"data",
"=",
"{",
"}",
"if",
"settings",
".",
"JS_CONTEXT_ENABLED",
":",
"for",
"context",
"in",
"RequestContext",
"(",
"self",
".",
"request",
")",
":",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"context",
")",
":",
"if",
"settings",
".",
"JS_CONTEXT",
"and",
"key",
"not",
"in",
"settings",
".",
"JS_CONTEXT",
":",
"continue",
"if",
"settings",
".",
"JS_CONTEXT_EXCLUDE",
"and",
"key",
"in",
"settings",
".",
"JS_CONTEXT_EXCLUDE",
":",
"continue",
"handler_name",
"=",
"'process_%s'",
"%",
"key",
"if",
"hasattr",
"(",
"self",
",",
"handler_name",
")",
":",
"handler",
"=",
"getattr",
"(",
"self",
",",
"handler_name",
")",
"data",
"[",
"key",
"]",
"=",
"handler",
"(",
"value",
",",
"data",
")",
"elif",
"isinstance",
"(",
"value",
",",
"SERIALIZABLE_TYPES",
")",
":",
"data",
"[",
"key",
"]",
"=",
"value",
"if",
"settings",
".",
"JS_USER_ENABLED",
":",
"self",
".",
"handle_user",
"(",
"data",
")",
"return",
"data"
] | Serialize the context as a dictionnary from a given request. | [
"Serialize",
"the",
"context",
"as",
"a",
"dictionnary",
"from",
"a",
"given",
"request",
"."
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/context_serializer.py#L37-L57 |
noirbizarre/django.js | djangojs/context_serializer.py | ContextSerializer.process_LANGUAGE_CODE | def process_LANGUAGE_CODE(self, language_code, data):
'''
Fix language code when set to non included default `en`
and add the extra variables ``LANGUAGE_NAME`` and ``LANGUAGE_NAME_LOCAL``.
'''
# Dirty hack to fix non included default
language_code = 'en-us' if language_code == 'en' else language_code
language = translation.get_language_info('en' if language_code == 'en-us' else language_code)
if not settings.JS_CONTEXT or 'LANGUAGE_NAME' in settings.JS_CONTEXT \
or (settings.JS_CONTEXT_EXCLUDE and 'LANGUAGE_NAME' in settings.JS_CONTEXT_EXCLUDE):
data['LANGUAGE_NAME'] = language['name']
if not settings.JS_CONTEXT or 'LANGUAGE_NAME_LOCAL' in settings.JS_CONTEXT \
or (settings.JS_CONTEXT_EXCLUDE and 'LANGUAGE_NAME_LOCAL' in settings.JS_CONTEXT_EXCLUDE):
data['LANGUAGE_NAME_LOCAL'] = language['name_local']
return language_code | python | def process_LANGUAGE_CODE(self, language_code, data):
'''
Fix language code when set to non included default `en`
and add the extra variables ``LANGUAGE_NAME`` and ``LANGUAGE_NAME_LOCAL``.
'''
# Dirty hack to fix non included default
language_code = 'en-us' if language_code == 'en' else language_code
language = translation.get_language_info('en' if language_code == 'en-us' else language_code)
if not settings.JS_CONTEXT or 'LANGUAGE_NAME' in settings.JS_CONTEXT \
or (settings.JS_CONTEXT_EXCLUDE and 'LANGUAGE_NAME' in settings.JS_CONTEXT_EXCLUDE):
data['LANGUAGE_NAME'] = language['name']
if not settings.JS_CONTEXT or 'LANGUAGE_NAME_LOCAL' in settings.JS_CONTEXT \
or (settings.JS_CONTEXT_EXCLUDE and 'LANGUAGE_NAME_LOCAL' in settings.JS_CONTEXT_EXCLUDE):
data['LANGUAGE_NAME_LOCAL'] = language['name_local']
return language_code | [
"def",
"process_LANGUAGE_CODE",
"(",
"self",
",",
"language_code",
",",
"data",
")",
":",
"# Dirty hack to fix non included default",
"language_code",
"=",
"'en-us'",
"if",
"language_code",
"==",
"'en'",
"else",
"language_code",
"language",
"=",
"translation",
".",
"get_language_info",
"(",
"'en'",
"if",
"language_code",
"==",
"'en-us'",
"else",
"language_code",
")",
"if",
"not",
"settings",
".",
"JS_CONTEXT",
"or",
"'LANGUAGE_NAME'",
"in",
"settings",
".",
"JS_CONTEXT",
"or",
"(",
"settings",
".",
"JS_CONTEXT_EXCLUDE",
"and",
"'LANGUAGE_NAME'",
"in",
"settings",
".",
"JS_CONTEXT_EXCLUDE",
")",
":",
"data",
"[",
"'LANGUAGE_NAME'",
"]",
"=",
"language",
"[",
"'name'",
"]",
"if",
"not",
"settings",
".",
"JS_CONTEXT",
"or",
"'LANGUAGE_NAME_LOCAL'",
"in",
"settings",
".",
"JS_CONTEXT",
"or",
"(",
"settings",
".",
"JS_CONTEXT_EXCLUDE",
"and",
"'LANGUAGE_NAME_LOCAL'",
"in",
"settings",
".",
"JS_CONTEXT_EXCLUDE",
")",
":",
"data",
"[",
"'LANGUAGE_NAME_LOCAL'",
"]",
"=",
"language",
"[",
"'name_local'",
"]",
"return",
"language_code"
] | Fix language code when set to non included default `en`
and add the extra variables ``LANGUAGE_NAME`` and ``LANGUAGE_NAME_LOCAL``. | [
"Fix",
"language",
"code",
"when",
"set",
"to",
"non",
"included",
"default",
"en",
"and",
"add",
"the",
"extra",
"variables",
"LANGUAGE_NAME",
"and",
"LANGUAGE_NAME_LOCAL",
"."
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/context_serializer.py#L69-L83 |
noirbizarre/django.js | djangojs/context_serializer.py | ContextSerializer.handle_user | def handle_user(self, data):
'''
Insert user informations in data
Override it to add extra user attributes.
'''
# Default to unauthenticated anonymous user
data['user'] = {
'username': '',
'is_authenticated': False,
'is_staff': False,
'is_superuser': False,
'permissions': tuple(),
}
if 'django.contrib.sessions.middleware.SessionMiddleware' in settings.MIDDLEWARE_CLASSES:
user = self.request.user
data['user']['is_authenticated'] = user.is_authenticated()
if hasattr(user, 'username'):
data['user']['username'] = user.username
elif hasattr(user, 'get_username'):
data['user']['username'] = user.get_username()
if hasattr(user, 'is_staff'):
data['user']['is_staff'] = user.is_staff
if hasattr(user, 'is_superuser'):
data['user']['is_superuser'] = user.is_superuser
if hasattr(user, 'get_all_permissions'):
data['user']['permissions'] = tuple(user.get_all_permissions()) | python | def handle_user(self, data):
'''
Insert user informations in data
Override it to add extra user attributes.
'''
# Default to unauthenticated anonymous user
data['user'] = {
'username': '',
'is_authenticated': False,
'is_staff': False,
'is_superuser': False,
'permissions': tuple(),
}
if 'django.contrib.sessions.middleware.SessionMiddleware' in settings.MIDDLEWARE_CLASSES:
user = self.request.user
data['user']['is_authenticated'] = user.is_authenticated()
if hasattr(user, 'username'):
data['user']['username'] = user.username
elif hasattr(user, 'get_username'):
data['user']['username'] = user.get_username()
if hasattr(user, 'is_staff'):
data['user']['is_staff'] = user.is_staff
if hasattr(user, 'is_superuser'):
data['user']['is_superuser'] = user.is_superuser
if hasattr(user, 'get_all_permissions'):
data['user']['permissions'] = tuple(user.get_all_permissions()) | [
"def",
"handle_user",
"(",
"self",
",",
"data",
")",
":",
"# Default to unauthenticated anonymous user",
"data",
"[",
"'user'",
"]",
"=",
"{",
"'username'",
":",
"''",
",",
"'is_authenticated'",
":",
"False",
",",
"'is_staff'",
":",
"False",
",",
"'is_superuser'",
":",
"False",
",",
"'permissions'",
":",
"tuple",
"(",
")",
",",
"}",
"if",
"'django.contrib.sessions.middleware.SessionMiddleware'",
"in",
"settings",
".",
"MIDDLEWARE_CLASSES",
":",
"user",
"=",
"self",
".",
"request",
".",
"user",
"data",
"[",
"'user'",
"]",
"[",
"'is_authenticated'",
"]",
"=",
"user",
".",
"is_authenticated",
"(",
")",
"if",
"hasattr",
"(",
"user",
",",
"'username'",
")",
":",
"data",
"[",
"'user'",
"]",
"[",
"'username'",
"]",
"=",
"user",
".",
"username",
"elif",
"hasattr",
"(",
"user",
",",
"'get_username'",
")",
":",
"data",
"[",
"'user'",
"]",
"[",
"'username'",
"]",
"=",
"user",
".",
"get_username",
"(",
")",
"if",
"hasattr",
"(",
"user",
",",
"'is_staff'",
")",
":",
"data",
"[",
"'user'",
"]",
"[",
"'is_staff'",
"]",
"=",
"user",
".",
"is_staff",
"if",
"hasattr",
"(",
"user",
",",
"'is_superuser'",
")",
":",
"data",
"[",
"'user'",
"]",
"[",
"'is_superuser'",
"]",
"=",
"user",
".",
"is_superuser",
"if",
"hasattr",
"(",
"user",
",",
"'get_all_permissions'",
")",
":",
"data",
"[",
"'user'",
"]",
"[",
"'permissions'",
"]",
"=",
"tuple",
"(",
"user",
".",
"get_all_permissions",
"(",
")",
")"
] | Insert user informations in data
Override it to add extra user attributes. | [
"Insert",
"user",
"informations",
"in",
"data"
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/context_serializer.py#L85-L111 |
noirbizarre/django.js | djangojs/utils.py | class_from_string | def class_from_string(name):
'''
Get a python class object from its name
'''
module_name, class_name = name.rsplit('.', 1)
__import__(module_name)
module = sys.modules[module_name]
return getattr(module, class_name) | python | def class_from_string(name):
'''
Get a python class object from its name
'''
module_name, class_name = name.rsplit('.', 1)
__import__(module_name)
module = sys.modules[module_name]
return getattr(module, class_name) | [
"def",
"class_from_string",
"(",
"name",
")",
":",
"module_name",
",",
"class_name",
"=",
"name",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"__import__",
"(",
"module_name",
")",
"module",
"=",
"sys",
".",
"modules",
"[",
"module_name",
"]",
"return",
"getattr",
"(",
"module",
",",
"class_name",
")"
] | Get a python class object from its name | [
"Get",
"a",
"python",
"class",
"object",
"from",
"its",
"name"
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/utils.py#L27-L34 |
noirbizarre/django.js | djangojs/utils.py | StorageGlobber.glob | def glob(cls, files=None):
'''
Glob a pattern or a list of pattern static storage relative(s).
'''
files = files or []
if isinstance(files, str):
files = os.path.normpath(files)
matches = lambda path: matches_patterns(path, [files])
return [path for path in cls.get_static_files() if matches(path)]
elif isinstance(files, (list, tuple)):
all_files = cls.get_static_files()
files = [os.path.normpath(f) for f in files]
sorted_result = []
for pattern in files:
sorted_result.extend([f for f in all_files if matches_patterns(f, [pattern])])
return sorted_result | python | def glob(cls, files=None):
'''
Glob a pattern or a list of pattern static storage relative(s).
'''
files = files or []
if isinstance(files, str):
files = os.path.normpath(files)
matches = lambda path: matches_patterns(path, [files])
return [path for path in cls.get_static_files() if matches(path)]
elif isinstance(files, (list, tuple)):
all_files = cls.get_static_files()
files = [os.path.normpath(f) for f in files]
sorted_result = []
for pattern in files:
sorted_result.extend([f for f in all_files if matches_patterns(f, [pattern])])
return sorted_result | [
"def",
"glob",
"(",
"cls",
",",
"files",
"=",
"None",
")",
":",
"files",
"=",
"files",
"or",
"[",
"]",
"if",
"isinstance",
"(",
"files",
",",
"str",
")",
":",
"files",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"files",
")",
"matches",
"=",
"lambda",
"path",
":",
"matches_patterns",
"(",
"path",
",",
"[",
"files",
"]",
")",
"return",
"[",
"path",
"for",
"path",
"in",
"cls",
".",
"get_static_files",
"(",
")",
"if",
"matches",
"(",
"path",
")",
"]",
"elif",
"isinstance",
"(",
"files",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"all_files",
"=",
"cls",
".",
"get_static_files",
"(",
")",
"files",
"=",
"[",
"os",
".",
"path",
".",
"normpath",
"(",
"f",
")",
"for",
"f",
"in",
"files",
"]",
"sorted_result",
"=",
"[",
"]",
"for",
"pattern",
"in",
"files",
":",
"sorted_result",
".",
"extend",
"(",
"[",
"f",
"for",
"f",
"in",
"all_files",
"if",
"matches_patterns",
"(",
"f",
",",
"[",
"pattern",
"]",
")",
"]",
")",
"return",
"sorted_result"
] | Glob a pattern or a list of pattern static storage relative(s). | [
"Glob",
"a",
"pattern",
"or",
"a",
"list",
"of",
"pattern",
"static",
"storage",
"relative",
"(",
"s",
")",
"."
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/utils.py#L54-L69 |
noirbizarre/django.js | djangojs/runners.py | PhantomJsRunner.execute | def execute(self, command):
'''
Execute a subprocess yielding output lines
'''
process = Popen(command, stdout=PIPE, stderr=STDOUT, universal_newlines=True)
while True:
if process.poll() is not None:
self.returncode = process.returncode # pylint: disable=W0201
break
yield process.stdout.readline() | python | def execute(self, command):
'''
Execute a subprocess yielding output lines
'''
process = Popen(command, stdout=PIPE, stderr=STDOUT, universal_newlines=True)
while True:
if process.poll() is not None:
self.returncode = process.returncode # pylint: disable=W0201
break
yield process.stdout.readline() | [
"def",
"execute",
"(",
"self",
",",
"command",
")",
":",
"process",
"=",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"STDOUT",
",",
"universal_newlines",
"=",
"True",
")",
"while",
"True",
":",
"if",
"process",
".",
"poll",
"(",
")",
"is",
"not",
"None",
":",
"self",
".",
"returncode",
"=",
"process",
".",
"returncode",
"# pylint: disable=W0201",
"break",
"yield",
"process",
".",
"stdout",
".",
"readline",
"(",
")"
] | Execute a subprocess yielding output lines | [
"Execute",
"a",
"subprocess",
"yielding",
"output",
"lines"
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/runners.py#L99-L108 |
noirbizarre/django.js | djangojs/runners.py | PhantomJsRunner.phantomjs | def phantomjs(self, *args, **kwargs):
'''
Execute PhantomJS by giving ``args`` as command line arguments.
If test are run in verbose mode (``-v/--verbosity`` = 2), it output:
- the title as header (with separators before and after)
- modules and test names
- assertions results (with ``django.utils.termcolors`` support)
In case of error, a JsTestException is raised to give details about javascript errors.
'''
separator = '=' * LINE_SIZE
title = kwargs['title'] if 'title' in kwargs else 'phantomjs output'
nb_spaces = (LINE_SIZE - len(title)) // 2
if VERBOSE:
print('')
print(separator)
print(' ' * nb_spaces + title)
print(separator)
sys.stdout.flush()
with NamedTemporaryFile(delete=True) as cookies_file:
cmd = ('phantomjs', '--cookies-file=%s' % cookies_file.name) + args
if self.timeout:
cmd += (str(self.timeout),)
parser = TapParser(debug=VERBOSITY > 2)
output = self.execute(cmd)
for item in parser.parse(output):
if VERBOSE:
print(item.display())
sys.stdout.flush()
if VERBOSE:
print(separator)
sys.stdout.flush()
failures = parser.suites.get_all_failures()
if failures:
raise JsTestException('Failed javascript assertions', failures)
if self.returncode > 0:
raise JsTestException('PhantomJS return with non-zero exit code (%s)' % self.returncode) | python | def phantomjs(self, *args, **kwargs):
'''
Execute PhantomJS by giving ``args`` as command line arguments.
If test are run in verbose mode (``-v/--verbosity`` = 2), it output:
- the title as header (with separators before and after)
- modules and test names
- assertions results (with ``django.utils.termcolors`` support)
In case of error, a JsTestException is raised to give details about javascript errors.
'''
separator = '=' * LINE_SIZE
title = kwargs['title'] if 'title' in kwargs else 'phantomjs output'
nb_spaces = (LINE_SIZE - len(title)) // 2
if VERBOSE:
print('')
print(separator)
print(' ' * nb_spaces + title)
print(separator)
sys.stdout.flush()
with NamedTemporaryFile(delete=True) as cookies_file:
cmd = ('phantomjs', '--cookies-file=%s' % cookies_file.name) + args
if self.timeout:
cmd += (str(self.timeout),)
parser = TapParser(debug=VERBOSITY > 2)
output = self.execute(cmd)
for item in parser.parse(output):
if VERBOSE:
print(item.display())
sys.stdout.flush()
if VERBOSE:
print(separator)
sys.stdout.flush()
failures = parser.suites.get_all_failures()
if failures:
raise JsTestException('Failed javascript assertions', failures)
if self.returncode > 0:
raise JsTestException('PhantomJS return with non-zero exit code (%s)' % self.returncode) | [
"def",
"phantomjs",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"separator",
"=",
"'='",
"*",
"LINE_SIZE",
"title",
"=",
"kwargs",
"[",
"'title'",
"]",
"if",
"'title'",
"in",
"kwargs",
"else",
"'phantomjs output'",
"nb_spaces",
"=",
"(",
"LINE_SIZE",
"-",
"len",
"(",
"title",
")",
")",
"//",
"2",
"if",
"VERBOSE",
":",
"print",
"(",
"''",
")",
"print",
"(",
"separator",
")",
"print",
"(",
"' '",
"*",
"nb_spaces",
"+",
"title",
")",
"print",
"(",
"separator",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"with",
"NamedTemporaryFile",
"(",
"delete",
"=",
"True",
")",
"as",
"cookies_file",
":",
"cmd",
"=",
"(",
"'phantomjs'",
",",
"'--cookies-file=%s'",
"%",
"cookies_file",
".",
"name",
")",
"+",
"args",
"if",
"self",
".",
"timeout",
":",
"cmd",
"+=",
"(",
"str",
"(",
"self",
".",
"timeout",
")",
",",
")",
"parser",
"=",
"TapParser",
"(",
"debug",
"=",
"VERBOSITY",
">",
"2",
")",
"output",
"=",
"self",
".",
"execute",
"(",
"cmd",
")",
"for",
"item",
"in",
"parser",
".",
"parse",
"(",
"output",
")",
":",
"if",
"VERBOSE",
":",
"print",
"(",
"item",
".",
"display",
"(",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"VERBOSE",
":",
"print",
"(",
"separator",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"failures",
"=",
"parser",
".",
"suites",
".",
"get_all_failures",
"(",
")",
"if",
"failures",
":",
"raise",
"JsTestException",
"(",
"'Failed javascript assertions'",
",",
"failures",
")",
"if",
"self",
".",
"returncode",
">",
"0",
":",
"raise",
"JsTestException",
"(",
"'PhantomJS return with non-zero exit code (%s)'",
"%",
"self",
".",
"returncode",
")"
] | Execute PhantomJS by giving ``args`` as command line arguments.
If test are run in verbose mode (``-v/--verbosity`` = 2), it output:
- the title as header (with separators before and after)
- modules and test names
- assertions results (with ``django.utils.termcolors`` support)
In case of error, a JsTestException is raised to give details about javascript errors. | [
"Execute",
"PhantomJS",
"by",
"giving",
"args",
"as",
"command",
"line",
"arguments",
"."
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/runners.py#L110-L152 |
noirbizarre/django.js | djangojs/runners.py | PhantomJsRunner.run_suite | def run_suite(self):
'''
Run a phantomjs test suite.
- ``phantomjs_runner`` is mandatory.
- Either ``url`` or ``url_name`` needs to be defined.
'''
if not self.phantomjs_runner:
raise JsTestException('phantomjs_runner need to be defined')
url = self.get_url()
self.phantomjs(self.phantomjs_runner, url, title=self.title)
self.cleanup() | python | def run_suite(self):
'''
Run a phantomjs test suite.
- ``phantomjs_runner`` is mandatory.
- Either ``url`` or ``url_name`` needs to be defined.
'''
if not self.phantomjs_runner:
raise JsTestException('phantomjs_runner need to be defined')
url = self.get_url()
self.phantomjs(self.phantomjs_runner, url, title=self.title)
self.cleanup() | [
"def",
"run_suite",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"phantomjs_runner",
":",
"raise",
"JsTestException",
"(",
"'phantomjs_runner need to be defined'",
")",
"url",
"=",
"self",
".",
"get_url",
"(",
")",
"self",
".",
"phantomjs",
"(",
"self",
".",
"phantomjs_runner",
",",
"url",
",",
"title",
"=",
"self",
".",
"title",
")",
"self",
".",
"cleanup",
"(",
")"
] | Run a phantomjs test suite.
- ``phantomjs_runner`` is mandatory.
- Either ``url`` or ``url_name`` needs to be defined. | [
"Run",
"a",
"phantomjs",
"test",
"suite",
"."
] | train | https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/djangojs/runners.py#L154-L167 |
frictionlessdata/tableschema-sql-py | tableschema_sql/writer.py | Writer.write | def write(self, rows, keyed=False):
"""Write rows/keyed_rows to table
"""
for row in rows:
keyed_row = row
if not keyed:
keyed_row = dict(zip(self.__schema.field_names, row))
keyed_row = self.__convert_row(keyed_row)
if self.__check_existing(keyed_row):
for wr in self.__insert():
yield wr
ret = self.__update(keyed_row)
if ret is not None:
yield WrittenRow(keyed_row, True, ret if self.__autoincrement else None)
continue
self.__buffer.append(keyed_row)
if len(self.__buffer) > BUFFER_SIZE:
for wr in self.__insert():
yield wr
for wr in self.__insert():
yield wr | python | def write(self, rows, keyed=False):
"""Write rows/keyed_rows to table
"""
for row in rows:
keyed_row = row
if not keyed:
keyed_row = dict(zip(self.__schema.field_names, row))
keyed_row = self.__convert_row(keyed_row)
if self.__check_existing(keyed_row):
for wr in self.__insert():
yield wr
ret = self.__update(keyed_row)
if ret is not None:
yield WrittenRow(keyed_row, True, ret if self.__autoincrement else None)
continue
self.__buffer.append(keyed_row)
if len(self.__buffer) > BUFFER_SIZE:
for wr in self.__insert():
yield wr
for wr in self.__insert():
yield wr | [
"def",
"write",
"(",
"self",
",",
"rows",
",",
"keyed",
"=",
"False",
")",
":",
"for",
"row",
"in",
"rows",
":",
"keyed_row",
"=",
"row",
"if",
"not",
"keyed",
":",
"keyed_row",
"=",
"dict",
"(",
"zip",
"(",
"self",
".",
"__schema",
".",
"field_names",
",",
"row",
")",
")",
"keyed_row",
"=",
"self",
".",
"__convert_row",
"(",
"keyed_row",
")",
"if",
"self",
".",
"__check_existing",
"(",
"keyed_row",
")",
":",
"for",
"wr",
"in",
"self",
".",
"__insert",
"(",
")",
":",
"yield",
"wr",
"ret",
"=",
"self",
".",
"__update",
"(",
"keyed_row",
")",
"if",
"ret",
"is",
"not",
"None",
":",
"yield",
"WrittenRow",
"(",
"keyed_row",
",",
"True",
",",
"ret",
"if",
"self",
".",
"__autoincrement",
"else",
"None",
")",
"continue",
"self",
".",
"__buffer",
".",
"append",
"(",
"keyed_row",
")",
"if",
"len",
"(",
"self",
".",
"__buffer",
")",
">",
"BUFFER_SIZE",
":",
"for",
"wr",
"in",
"self",
".",
"__insert",
"(",
")",
":",
"yield",
"wr",
"for",
"wr",
"in",
"self",
".",
"__insert",
"(",
")",
":",
"yield",
"wr"
] | Write rows/keyed_rows to table | [
"Write",
"rows",
"/",
"keyed_rows",
"to",
"table"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/writer.py#L32-L52 |
frictionlessdata/tableschema-sql-py | tableschema_sql/writer.py | Writer.__prepare_bloom | def __prepare_bloom(self):
"""Prepare bloom for existing checks
"""
self.__bloom = pybloom_live.ScalableBloomFilter()
columns = [getattr(self.__table.c, key) for key in self.__update_keys]
keys = select(columns).execution_options(stream_results=True).execute()
for key in keys:
self.__bloom.add(tuple(key)) | python | def __prepare_bloom(self):
"""Prepare bloom for existing checks
"""
self.__bloom = pybloom_live.ScalableBloomFilter()
columns = [getattr(self.__table.c, key) for key in self.__update_keys]
keys = select(columns).execution_options(stream_results=True).execute()
for key in keys:
self.__bloom.add(tuple(key)) | [
"def",
"__prepare_bloom",
"(",
"self",
")",
":",
"self",
".",
"__bloom",
"=",
"pybloom_live",
".",
"ScalableBloomFilter",
"(",
")",
"columns",
"=",
"[",
"getattr",
"(",
"self",
".",
"__table",
".",
"c",
",",
"key",
")",
"for",
"key",
"in",
"self",
".",
"__update_keys",
"]",
"keys",
"=",
"select",
"(",
"columns",
")",
".",
"execution_options",
"(",
"stream_results",
"=",
"True",
")",
".",
"execute",
"(",
")",
"for",
"key",
"in",
"keys",
":",
"self",
".",
"__bloom",
".",
"add",
"(",
"tuple",
"(",
"key",
")",
")"
] | Prepare bloom for existing checks | [
"Prepare",
"bloom",
"for",
"existing",
"checks"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/writer.py#L56-L63 |
frictionlessdata/tableschema-sql-py | tableschema_sql/writer.py | Writer.__insert | def __insert(self):
"""Insert rows to table
"""
if len(self.__buffer) > 0:
# Insert data
statement = self.__table.insert()
if self.__autoincrement:
statement = statement.returning(
getattr(self.__table.c, self.__autoincrement))
statement = statement.values(self.__buffer)
res = statement.execute()
for id, in res:
row = self.__buffer.pop(0)
yield WrittenRow(row, False, id)
else:
statement.execute(self.__buffer)
for row in self.__buffer:
yield WrittenRow(row, False, None)
# Clean memory
self.__buffer = [] | python | def __insert(self):
"""Insert rows to table
"""
if len(self.__buffer) > 0:
# Insert data
statement = self.__table.insert()
if self.__autoincrement:
statement = statement.returning(
getattr(self.__table.c, self.__autoincrement))
statement = statement.values(self.__buffer)
res = statement.execute()
for id, in res:
row = self.__buffer.pop(0)
yield WrittenRow(row, False, id)
else:
statement.execute(self.__buffer)
for row in self.__buffer:
yield WrittenRow(row, False, None)
# Clean memory
self.__buffer = [] | [
"def",
"__insert",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"__buffer",
")",
">",
"0",
":",
"# Insert data",
"statement",
"=",
"self",
".",
"__table",
".",
"insert",
"(",
")",
"if",
"self",
".",
"__autoincrement",
":",
"statement",
"=",
"statement",
".",
"returning",
"(",
"getattr",
"(",
"self",
".",
"__table",
".",
"c",
",",
"self",
".",
"__autoincrement",
")",
")",
"statement",
"=",
"statement",
".",
"values",
"(",
"self",
".",
"__buffer",
")",
"res",
"=",
"statement",
".",
"execute",
"(",
")",
"for",
"id",
",",
"in",
"res",
":",
"row",
"=",
"self",
".",
"__buffer",
".",
"pop",
"(",
"0",
")",
"yield",
"WrittenRow",
"(",
"row",
",",
"False",
",",
"id",
")",
"else",
":",
"statement",
".",
"execute",
"(",
"self",
".",
"__buffer",
")",
"for",
"row",
"in",
"self",
".",
"__buffer",
":",
"yield",
"WrittenRow",
"(",
"row",
",",
"False",
",",
"None",
")",
"# Clean memory",
"self",
".",
"__buffer",
"=",
"[",
"]"
] | Insert rows to table | [
"Insert",
"rows",
"to",
"table"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/writer.py#L65-L84 |
frictionlessdata/tableschema-sql-py | tableschema_sql/writer.py | Writer.__update | def __update(self, row):
"""Update rows in table
"""
expr = self.__table.update().values(row)
for key in self.__update_keys:
expr = expr.where(getattr(self.__table.c, key) == row[key])
if self.__autoincrement:
expr = expr.returning(getattr(self.__table.c, self.__autoincrement))
res = expr.execute()
if res.rowcount > 0:
if self.__autoincrement:
first = next(iter(res))
last_row_id = first[0]
return last_row_id
return 0
return None | python | def __update(self, row):
"""Update rows in table
"""
expr = self.__table.update().values(row)
for key in self.__update_keys:
expr = expr.where(getattr(self.__table.c, key) == row[key])
if self.__autoincrement:
expr = expr.returning(getattr(self.__table.c, self.__autoincrement))
res = expr.execute()
if res.rowcount > 0:
if self.__autoincrement:
first = next(iter(res))
last_row_id = first[0]
return last_row_id
return 0
return None | [
"def",
"__update",
"(",
"self",
",",
"row",
")",
":",
"expr",
"=",
"self",
".",
"__table",
".",
"update",
"(",
")",
".",
"values",
"(",
"row",
")",
"for",
"key",
"in",
"self",
".",
"__update_keys",
":",
"expr",
"=",
"expr",
".",
"where",
"(",
"getattr",
"(",
"self",
".",
"__table",
".",
"c",
",",
"key",
")",
"==",
"row",
"[",
"key",
"]",
")",
"if",
"self",
".",
"__autoincrement",
":",
"expr",
"=",
"expr",
".",
"returning",
"(",
"getattr",
"(",
"self",
".",
"__table",
".",
"c",
",",
"self",
".",
"__autoincrement",
")",
")",
"res",
"=",
"expr",
".",
"execute",
"(",
")",
"if",
"res",
".",
"rowcount",
">",
"0",
":",
"if",
"self",
".",
"__autoincrement",
":",
"first",
"=",
"next",
"(",
"iter",
"(",
"res",
")",
")",
"last_row_id",
"=",
"first",
"[",
"0",
"]",
"return",
"last_row_id",
"return",
"0",
"return",
"None"
] | Update rows in table | [
"Update",
"rows",
"in",
"table"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/writer.py#L86-L101 |
frictionlessdata/tableschema-sql-py | tableschema_sql/writer.py | Writer.__check_existing | def __check_existing(self, row):
"""Check if row exists in table
"""
if self.__update_keys is not None:
key = tuple(row[key] for key in self.__update_keys)
if key in self.__bloom:
return True
self.__bloom.add(key)
return False
return False | python | def __check_existing(self, row):
"""Check if row exists in table
"""
if self.__update_keys is not None:
key = tuple(row[key] for key in self.__update_keys)
if key in self.__bloom:
return True
self.__bloom.add(key)
return False
return False | [
"def",
"__check_existing",
"(",
"self",
",",
"row",
")",
":",
"if",
"self",
".",
"__update_keys",
"is",
"not",
"None",
":",
"key",
"=",
"tuple",
"(",
"row",
"[",
"key",
"]",
"for",
"key",
"in",
"self",
".",
"__update_keys",
")",
"if",
"key",
"in",
"self",
".",
"__bloom",
":",
"return",
"True",
"self",
".",
"__bloom",
".",
"add",
"(",
"key",
")",
"return",
"False",
"return",
"False"
] | Check if row exists in table | [
"Check",
"if",
"row",
"exists",
"in",
"table"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/writer.py#L103-L112 |
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.buckets | def buckets(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
buckets = []
for table in self.__metadata.sorted_tables:
bucket = self.__mapper.restore_bucket(table.name)
if bucket is not None:
buckets.append(bucket)
return buckets | python | def buckets(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
buckets = []
for table in self.__metadata.sorted_tables:
bucket = self.__mapper.restore_bucket(table.name)
if bucket is not None:
buckets.append(bucket)
return buckets | [
"def",
"buckets",
"(",
"self",
")",
":",
"buckets",
"=",
"[",
"]",
"for",
"table",
"in",
"self",
".",
"__metadata",
".",
"sorted_tables",
":",
"bucket",
"=",
"self",
".",
"__mapper",
".",
"restore_bucket",
"(",
"table",
".",
"name",
")",
"if",
"bucket",
"is",
"not",
"None",
":",
"buckets",
".",
"append",
"(",
"bucket",
")",
"return",
"buckets"
] | https://github.com/frictionlessdata/tableschema-sql-py#storage | [
"https",
":",
"//",
"github",
".",
"com",
"/",
"frictionlessdata",
"/",
"tableschema",
"-",
"sql",
"-",
"py#storage"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L57-L65 |
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.create | def create(self, bucket, descriptor, force=False, indexes_fields=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
if indexes_fields is None or len(indexes_fields) == 0:
indexes_fields = [()] * len(descriptors)
elif type(indexes_fields[0][0]) not in {list, tuple}:
indexes_fields = [indexes_fields]
# Check dimensions
if not (len(buckets) == len(descriptors) == len(indexes_fields)):
raise tableschema.exceptions.StorageError('Wrong argument dimensions')
# Check buckets for existence
for bucket in reversed(self.buckets):
if bucket in buckets:
if not force:
message = 'Bucket "%s" already exists.' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define buckets
for bucket, descriptor, index_fields in zip(buckets, descriptors, indexes_fields):
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
columns, constraints, indexes, fallbacks, table_comment = self.__mapper \
.convert_descriptor(bucket, descriptor, index_fields, self.__autoincrement)
Table(table_name, self.__metadata, *(columns + constraints + indexes),
comment=table_comment)
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Create tables, update metadata
self.__metadata.create_all() | python | def create(self, bucket, descriptor, force=False, indexes_fields=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
if indexes_fields is None or len(indexes_fields) == 0:
indexes_fields = [()] * len(descriptors)
elif type(indexes_fields[0][0]) not in {list, tuple}:
indexes_fields = [indexes_fields]
# Check dimensions
if not (len(buckets) == len(descriptors) == len(indexes_fields)):
raise tableschema.exceptions.StorageError('Wrong argument dimensions')
# Check buckets for existence
for bucket in reversed(self.buckets):
if bucket in buckets:
if not force:
message = 'Bucket "%s" already exists.' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define buckets
for bucket, descriptor, index_fields in zip(buckets, descriptors, indexes_fields):
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
columns, constraints, indexes, fallbacks, table_comment = self.__mapper \
.convert_descriptor(bucket, descriptor, index_fields, self.__autoincrement)
Table(table_name, self.__metadata, *(columns + constraints + indexes),
comment=table_comment)
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Create tables, update metadata
self.__metadata.create_all() | [
"def",
"create",
"(",
"self",
",",
"bucket",
",",
"descriptor",
",",
"force",
"=",
"False",
",",
"indexes_fields",
"=",
"None",
")",
":",
"# Make lists",
"buckets",
"=",
"bucket",
"if",
"isinstance",
"(",
"bucket",
",",
"six",
".",
"string_types",
")",
":",
"buckets",
"=",
"[",
"bucket",
"]",
"descriptors",
"=",
"descriptor",
"if",
"isinstance",
"(",
"descriptor",
",",
"dict",
")",
":",
"descriptors",
"=",
"[",
"descriptor",
"]",
"if",
"indexes_fields",
"is",
"None",
"or",
"len",
"(",
"indexes_fields",
")",
"==",
"0",
":",
"indexes_fields",
"=",
"[",
"(",
")",
"]",
"*",
"len",
"(",
"descriptors",
")",
"elif",
"type",
"(",
"indexes_fields",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"not",
"in",
"{",
"list",
",",
"tuple",
"}",
":",
"indexes_fields",
"=",
"[",
"indexes_fields",
"]",
"# Check dimensions",
"if",
"not",
"(",
"len",
"(",
"buckets",
")",
"==",
"len",
"(",
"descriptors",
")",
"==",
"len",
"(",
"indexes_fields",
")",
")",
":",
"raise",
"tableschema",
".",
"exceptions",
".",
"StorageError",
"(",
"'Wrong argument dimensions'",
")",
"# Check buckets for existence",
"for",
"bucket",
"in",
"reversed",
"(",
"self",
".",
"buckets",
")",
":",
"if",
"bucket",
"in",
"buckets",
":",
"if",
"not",
"force",
":",
"message",
"=",
"'Bucket \"%s\" already exists.'",
"%",
"bucket",
"raise",
"tableschema",
".",
"exceptions",
".",
"StorageError",
"(",
"message",
")",
"self",
".",
"delete",
"(",
"bucket",
")",
"# Define buckets",
"for",
"bucket",
",",
"descriptor",
",",
"index_fields",
"in",
"zip",
"(",
"buckets",
",",
"descriptors",
",",
"indexes_fields",
")",
":",
"tableschema",
".",
"validate",
"(",
"descriptor",
")",
"table_name",
"=",
"self",
".",
"__mapper",
".",
"convert_bucket",
"(",
"bucket",
")",
"columns",
",",
"constraints",
",",
"indexes",
",",
"fallbacks",
",",
"table_comment",
"=",
"self",
".",
"__mapper",
".",
"convert_descriptor",
"(",
"bucket",
",",
"descriptor",
",",
"index_fields",
",",
"self",
".",
"__autoincrement",
")",
"Table",
"(",
"table_name",
",",
"self",
".",
"__metadata",
",",
"*",
"(",
"columns",
"+",
"constraints",
"+",
"indexes",
")",
",",
"comment",
"=",
"table_comment",
")",
"self",
".",
"__descriptors",
"[",
"bucket",
"]",
"=",
"descriptor",
"self",
".",
"__fallbacks",
"[",
"bucket",
"]",
"=",
"fallbacks",
"# Create tables, update metadata",
"self",
".",
"__metadata",
".",
"create_all",
"(",
")"
] | https://github.com/frictionlessdata/tableschema-sql-py#storage | [
"https",
":",
"//",
"github",
".",
"com",
"/",
"frictionlessdata",
"/",
"tableschema",
"-",
"sql",
"-",
"py#storage"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L67-L107 |
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.delete | def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect() | python | def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect() | [
"def",
"delete",
"(",
"self",
",",
"bucket",
"=",
"None",
",",
"ignore",
"=",
"False",
")",
":",
"# Make lists",
"buckets",
"=",
"bucket",
"if",
"isinstance",
"(",
"bucket",
",",
"six",
".",
"string_types",
")",
":",
"buckets",
"=",
"[",
"bucket",
"]",
"elif",
"bucket",
"is",
"None",
":",
"buckets",
"=",
"reversed",
"(",
"self",
".",
"buckets",
")",
"# Iterate",
"tables",
"=",
"[",
"]",
"for",
"bucket",
"in",
"buckets",
":",
"# Check existent",
"if",
"bucket",
"not",
"in",
"self",
".",
"buckets",
":",
"if",
"not",
"ignore",
":",
"message",
"=",
"'Bucket \"%s\" doesn\\'t exist.'",
"%",
"bucket",
"raise",
"tableschema",
".",
"exceptions",
".",
"StorageError",
"(",
"message",
")",
"return",
"# Remove from buckets",
"if",
"bucket",
"in",
"self",
".",
"__descriptors",
":",
"del",
"self",
".",
"__descriptors",
"[",
"bucket",
"]",
"# Add table to tables",
"table",
"=",
"self",
".",
"__get_table",
"(",
"bucket",
")",
"tables",
".",
"append",
"(",
"table",
")",
"# Drop tables, update metadata",
"self",
".",
"__metadata",
".",
"drop_all",
"(",
"tables",
"=",
"tables",
")",
"self",
".",
"__metadata",
".",
"clear",
"(",
")",
"self",
".",
"__reflect",
"(",
")"
] | https://github.com/frictionlessdata/tableschema-sql-py#storage | [
"https",
":",
"//",
"github",
".",
"com",
"/",
"frictionlessdata",
"/",
"tableschema",
"-",
"sql",
"-",
"py#storage"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L109-L142 |
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.describe | def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor | python | def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor | [
"def",
"describe",
"(",
"self",
",",
"bucket",
",",
"descriptor",
"=",
"None",
")",
":",
"# Set descriptor",
"if",
"descriptor",
"is",
"not",
"None",
":",
"self",
".",
"__descriptors",
"[",
"bucket",
"]",
"=",
"descriptor",
"# Get descriptor",
"else",
":",
"descriptor",
"=",
"self",
".",
"__descriptors",
".",
"get",
"(",
"bucket",
")",
"if",
"descriptor",
"is",
"None",
":",
"table",
"=",
"self",
".",
"__get_table",
"(",
"bucket",
")",
"descriptor",
"=",
"self",
".",
"__mapper",
".",
"restore_descriptor",
"(",
"table",
".",
"name",
",",
"table",
".",
"columns",
",",
"table",
".",
"constraints",
",",
"self",
".",
"__autoincrement",
")",
"return",
"descriptor"
] | https://github.com/frictionlessdata/tableschema-sql-py#storage | [
"https",
":",
"//",
"github",
".",
"com",
"/",
"frictionlessdata",
"/",
"tableschema",
"-",
"sql",
"-",
"py#storage"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L144-L160 |
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.iter | def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row | python | def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row | [
"def",
"iter",
"(",
"self",
",",
"bucket",
")",
":",
"# Get table and fallbacks",
"table",
"=",
"self",
".",
"__get_table",
"(",
"bucket",
")",
"schema",
"=",
"tableschema",
".",
"Schema",
"(",
"self",
".",
"describe",
"(",
"bucket",
")",
")",
"# Open and close transaction",
"with",
"self",
".",
"__connection",
".",
"begin",
"(",
")",
":",
"# Streaming could be not working for some backends:",
"# http://docs.sqlalchemy.org/en/latest/core/connections.html",
"select",
"=",
"table",
".",
"select",
"(",
")",
".",
"execution_options",
"(",
"stream_results",
"=",
"True",
")",
"result",
"=",
"select",
".",
"execute",
"(",
")",
"for",
"row",
"in",
"result",
":",
"row",
"=",
"self",
".",
"__mapper",
".",
"restore_row",
"(",
"row",
",",
"schema",
"=",
"schema",
")",
"yield",
"row"
] | https://github.com/frictionlessdata/tableschema-sql-py#storage | [
"https",
":",
"//",
"github",
".",
"com",
"/",
"frictionlessdata",
"/",
"tableschema",
"-",
"sql",
"-",
"py#storage"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L162-L178 |
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.write | def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0) | python | def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0) | [
"def",
"write",
"(",
"self",
",",
"bucket",
",",
"rows",
",",
"keyed",
"=",
"False",
",",
"as_generator",
"=",
"False",
",",
"update_keys",
"=",
"None",
")",
":",
"# Check update keys",
"if",
"update_keys",
"is",
"not",
"None",
"and",
"len",
"(",
"update_keys",
")",
"==",
"0",
":",
"message",
"=",
"'Argument \"update_keys\" cannot be an empty list'",
"raise",
"tableschema",
".",
"exceptions",
".",
"StorageError",
"(",
"message",
")",
"# Get table and description",
"table",
"=",
"self",
".",
"__get_table",
"(",
"bucket",
")",
"schema",
"=",
"tableschema",
".",
"Schema",
"(",
"self",
".",
"describe",
"(",
"bucket",
")",
")",
"fallbacks",
"=",
"self",
".",
"__fallbacks",
".",
"get",
"(",
"bucket",
",",
"[",
"]",
")",
"# Write rows to table",
"convert_row",
"=",
"partial",
"(",
"self",
".",
"__mapper",
".",
"convert_row",
",",
"schema",
"=",
"schema",
",",
"fallbacks",
"=",
"fallbacks",
")",
"writer",
"=",
"Writer",
"(",
"table",
",",
"schema",
",",
"update_keys",
",",
"self",
".",
"__autoincrement",
",",
"convert_row",
")",
"with",
"self",
".",
"__connection",
".",
"begin",
"(",
")",
":",
"gen",
"=",
"writer",
".",
"write",
"(",
"rows",
",",
"keyed",
"=",
"keyed",
")",
"if",
"as_generator",
":",
"return",
"gen",
"collections",
".",
"deque",
"(",
"gen",
",",
"maxlen",
"=",
"0",
")"
] | https://github.com/frictionlessdata/tableschema-sql-py#storage | [
"https",
":",
"//",
"github",
".",
"com",
"/",
"frictionlessdata",
"/",
"tableschema",
"-",
"sql",
"-",
"py#storage"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L186-L207 |
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.__get_table | def __get_table(self, bucket):
"""Get table by bucket
"""
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name] | python | def __get_table(self, bucket):
"""Get table by bucket
"""
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name] | [
"def",
"__get_table",
"(",
"self",
",",
"bucket",
")",
":",
"table_name",
"=",
"self",
".",
"__mapper",
".",
"convert_bucket",
"(",
"bucket",
")",
"if",
"self",
".",
"__dbschema",
":",
"table_name",
"=",
"'.'",
".",
"join",
"(",
"(",
"self",
".",
"__dbschema",
",",
"table_name",
")",
")",
"return",
"self",
".",
"__metadata",
".",
"tables",
"[",
"table_name",
"]"
] | Get table by bucket | [
"Get",
"table",
"by",
"bucket"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L211-L217 |
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.__reflect | def __reflect(self):
"""Reflect metadata
"""
def only(name, _):
return self.__only(name) and self.__mapper.restore_bucket(name) is not None
self.__metadata.reflect(only=only) | python | def __reflect(self):
"""Reflect metadata
"""
def only(name, _):
return self.__only(name) and self.__mapper.restore_bucket(name) is not None
self.__metadata.reflect(only=only) | [
"def",
"__reflect",
"(",
"self",
")",
":",
"def",
"only",
"(",
"name",
",",
"_",
")",
":",
"return",
"self",
".",
"__only",
"(",
"name",
")",
"and",
"self",
".",
"__mapper",
".",
"restore_bucket",
"(",
"name",
")",
"is",
"not",
"None",
"self",
".",
"__metadata",
".",
"reflect",
"(",
"only",
"=",
"only",
")"
] | Reflect metadata | [
"Reflect",
"metadata"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L219-L226 |
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | _get_field_comment | def _get_field_comment(field, separator=' - '):
"""
Create SQL comment from field's title and description
:param field: tableschema-py Field, with optional 'title' and 'description' values
:param separator:
:return:
>>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': 'my_desc'}))
'my_title - my_desc'
>>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': None}))
'my_title'
>>> _get_field_comment(tableschema.Field({'title': '', 'description': 'my_description'}))
'my_description'
>>> _get_field_comment(tableschema.Field({}))
''
"""
title = field.descriptor.get('title') or ''
description = field.descriptor.get('description') or ''
return _get_comment(description, title, separator) | python | def _get_field_comment(field, separator=' - '):
"""
Create SQL comment from field's title and description
:param field: tableschema-py Field, with optional 'title' and 'description' values
:param separator:
:return:
>>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': 'my_desc'}))
'my_title - my_desc'
>>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': None}))
'my_title'
>>> _get_field_comment(tableschema.Field({'title': '', 'description': 'my_description'}))
'my_description'
>>> _get_field_comment(tableschema.Field({}))
''
"""
title = field.descriptor.get('title') or ''
description = field.descriptor.get('description') or ''
return _get_comment(description, title, separator) | [
"def",
"_get_field_comment",
"(",
"field",
",",
"separator",
"=",
"' - '",
")",
":",
"title",
"=",
"field",
".",
"descriptor",
".",
"get",
"(",
"'title'",
")",
"or",
"''",
"description",
"=",
"field",
".",
"descriptor",
".",
"get",
"(",
"'description'",
")",
"or",
"''",
"return",
"_get_comment",
"(",
"description",
",",
"title",
",",
"separator",
")"
] | Create SQL comment from field's title and description
:param field: tableschema-py Field, with optional 'title' and 'description' values
:param separator:
:return:
>>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': 'my_desc'}))
'my_title - my_desc'
>>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': None}))
'my_title'
>>> _get_field_comment(tableschema.Field({'title': '', 'description': 'my_description'}))
'my_description'
>>> _get_field_comment(tableschema.Field({}))
'' | [
"Create",
"SQL",
"comment",
"from",
"field",
"s",
"title",
"and",
"description"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L281-L300 |
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.convert_descriptor | def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL
"""
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment | python | def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL
"""
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment | [
"def",
"convert_descriptor",
"(",
"self",
",",
"bucket",
",",
"descriptor",
",",
"index_fields",
"=",
"[",
"]",
",",
"autoincrement",
"=",
"None",
")",
":",
"# Prepare",
"columns",
"=",
"[",
"]",
"indexes",
"=",
"[",
"]",
"fallbacks",
"=",
"[",
"]",
"constraints",
"=",
"[",
"]",
"column_mapping",
"=",
"{",
"}",
"table_name",
"=",
"self",
".",
"convert_bucket",
"(",
"bucket",
")",
"table_comment",
"=",
"_get_comment",
"(",
"descriptor",
".",
"get",
"(",
"'title'",
",",
"''",
")",
",",
"descriptor",
".",
"get",
"(",
"'description'",
",",
"''",
")",
")",
"schema",
"=",
"tableschema",
".",
"Schema",
"(",
"descriptor",
")",
"# Autoincrement",
"if",
"autoincrement",
"is",
"not",
"None",
":",
"columns",
".",
"append",
"(",
"sa",
".",
"Column",
"(",
"autoincrement",
",",
"sa",
".",
"Integer",
",",
"autoincrement",
"=",
"True",
",",
"nullable",
"=",
"False",
")",
")",
"# Fields",
"for",
"field",
"in",
"schema",
".",
"fields",
":",
"column_type",
"=",
"self",
".",
"convert_type",
"(",
"field",
".",
"type",
")",
"if",
"not",
"column_type",
":",
"column_type",
"=",
"sa",
".",
"Text",
"fallbacks",
".",
"append",
"(",
"field",
".",
"name",
")",
"nullable",
"=",
"not",
"field",
".",
"required",
"table_comment",
"=",
"_get_field_comment",
"(",
"field",
")",
"unique",
"=",
"field",
".",
"constraints",
".",
"get",
"(",
"'unique'",
",",
"False",
")",
"column",
"=",
"sa",
".",
"Column",
"(",
"field",
".",
"name",
",",
"column_type",
",",
"nullable",
"=",
"nullable",
",",
"comment",
"=",
"table_comment",
",",
"unique",
"=",
"unique",
")",
"columns",
".",
"append",
"(",
"column",
")",
"column_mapping",
"[",
"field",
".",
"name",
"]",
"=",
"column",
"# Primary key",
"pk",
"=",
"descriptor",
".",
"get",
"(",
"'primaryKey'",
",",
"None",
")",
"if",
"pk",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"pk",
",",
"six",
".",
"string_types",
")",
":",
"pk",
"=",
"[",
"pk",
"]",
"if",
"autoincrement",
"is",
"not",
"None",
":",
"if",
"pk",
"is",
"not",
"None",
":",
"pk",
"=",
"[",
"autoincrement",
"]",
"+",
"pk",
"else",
":",
"pk",
"=",
"[",
"autoincrement",
"]",
"if",
"pk",
"is",
"not",
"None",
":",
"constraint",
"=",
"sa",
".",
"PrimaryKeyConstraint",
"(",
"*",
"pk",
")",
"constraints",
".",
"append",
"(",
"constraint",
")",
"# Foreign keys",
"if",
"self",
".",
"__dialect",
"==",
"'postgresql'",
":",
"fks",
"=",
"descriptor",
".",
"get",
"(",
"'foreignKeys'",
",",
"[",
"]",
")",
"for",
"fk",
"in",
"fks",
":",
"fields",
"=",
"fk",
"[",
"'fields'",
"]",
"resource",
"=",
"fk",
"[",
"'reference'",
"]",
"[",
"'resource'",
"]",
"foreign_fields",
"=",
"fk",
"[",
"'reference'",
"]",
"[",
"'fields'",
"]",
"if",
"isinstance",
"(",
"fields",
",",
"six",
".",
"string_types",
")",
":",
"fields",
"=",
"[",
"fields",
"]",
"if",
"resource",
"!=",
"''",
":",
"table_name",
"=",
"self",
".",
"convert_bucket",
"(",
"resource",
")",
"if",
"isinstance",
"(",
"foreign_fields",
",",
"six",
".",
"string_types",
")",
":",
"foreign_fields",
"=",
"[",
"foreign_fields",
"]",
"composer",
"=",
"lambda",
"field",
":",
"'.'",
".",
"join",
"(",
"[",
"table_name",
",",
"field",
"]",
")",
"foreign_fields",
"=",
"list",
"(",
"map",
"(",
"composer",
",",
"foreign_fields",
")",
")",
"constraint",
"=",
"sa",
".",
"ForeignKeyConstraint",
"(",
"fields",
",",
"foreign_fields",
")",
"constraints",
".",
"append",
"(",
"constraint",
")",
"# Indexes",
"if",
"self",
".",
"__dialect",
"==",
"'postgresql'",
":",
"for",
"index",
",",
"index_definition",
"in",
"enumerate",
"(",
"index_fields",
")",
":",
"name",
"=",
"table_name",
"+",
"'_ix%03d'",
"%",
"index",
"index_columns",
"=",
"[",
"column_mapping",
"[",
"field",
"]",
"for",
"field",
"in",
"index_definition",
"]",
"indexes",
".",
"append",
"(",
"sa",
".",
"Index",
"(",
"name",
",",
"*",
"index_columns",
")",
")",
"return",
"columns",
",",
"constraints",
",",
"indexes",
",",
"fallbacks",
",",
"table_comment"
] | Convert descriptor to SQL | [
"Convert",
"descriptor",
"to",
"SQL"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L32-L104 |
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.convert_row | def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL
"""
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row | python | def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL
"""
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row | [
"def",
"convert_row",
"(",
"self",
",",
"keyed_row",
",",
"schema",
",",
"fallbacks",
")",
":",
"for",
"key",
",",
"value",
"in",
"list",
"(",
"keyed_row",
".",
"items",
"(",
")",
")",
":",
"field",
"=",
"schema",
".",
"get_field",
"(",
"key",
")",
"if",
"not",
"field",
":",
"del",
"keyed_row",
"[",
"key",
"]",
"if",
"key",
"in",
"fallbacks",
":",
"value",
"=",
"_uncast_value",
"(",
"value",
",",
"field",
"=",
"field",
")",
"else",
":",
"value",
"=",
"field",
".",
"cast_value",
"(",
"value",
")",
"keyed_row",
"[",
"key",
"]",
"=",
"value",
"return",
"keyed_row"
] | Convert row to SQL | [
"Convert",
"row",
"to",
"SQL"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L106-L118 |
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.convert_type | def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type] | python | def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type] | [
"def",
"convert_type",
"(",
"self",
",",
"type",
")",
":",
"# Default dialect",
"mapping",
"=",
"{",
"'any'",
":",
"sa",
".",
"Text",
",",
"'array'",
":",
"None",
",",
"'boolean'",
":",
"sa",
".",
"Boolean",
",",
"'date'",
":",
"sa",
".",
"Date",
",",
"'datetime'",
":",
"sa",
".",
"DateTime",
",",
"'duration'",
":",
"None",
",",
"'geojson'",
":",
"None",
",",
"'geopoint'",
":",
"None",
",",
"'integer'",
":",
"sa",
".",
"Integer",
",",
"'number'",
":",
"sa",
".",
"Float",
",",
"'object'",
":",
"None",
",",
"'string'",
":",
"sa",
".",
"Text",
",",
"'time'",
":",
"sa",
".",
"Time",
",",
"'year'",
":",
"sa",
".",
"Integer",
",",
"'yearmonth'",
":",
"None",
",",
"}",
"# Postgresql dialect",
"if",
"self",
".",
"__dialect",
"==",
"'postgresql'",
":",
"mapping",
".",
"update",
"(",
"{",
"'array'",
":",
"JSONB",
",",
"'geojson'",
":",
"JSONB",
",",
"'number'",
":",
"sa",
".",
"Numeric",
",",
"'object'",
":",
"JSONB",
",",
"}",
")",
"# Not supported type",
"if",
"type",
"not",
"in",
"mapping",
":",
"message",
"=",
"'Field type \"%s\" is not supported'",
"raise",
"tableschema",
".",
"exceptions",
".",
"StorageError",
"(",
"message",
"%",
"type",
")",
"return",
"mapping",
"[",
"type",
"]"
] | Convert type to SQL | [
"Convert",
"type",
"to",
"SQL"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L120-L157 |
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.restore_bucket | def restore_bucket(self, table_name):
"""Restore bucket from SQL
"""
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None | python | def restore_bucket(self, table_name):
"""Restore bucket from SQL
"""
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None | [
"def",
"restore_bucket",
"(",
"self",
",",
"table_name",
")",
":",
"if",
"table_name",
".",
"startswith",
"(",
"self",
".",
"__prefix",
")",
":",
"return",
"table_name",
".",
"replace",
"(",
"self",
".",
"__prefix",
",",
"''",
",",
"1",
")",
"return",
"None"
] | Restore bucket from SQL | [
"Restore",
"bucket",
"from",
"SQL"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L159-L164 |
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.restore_descriptor | def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
"""Restore descriptor from SQL
"""
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor | python | def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
"""Restore descriptor from SQL
"""
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor | [
"def",
"restore_descriptor",
"(",
"self",
",",
"table_name",
",",
"columns",
",",
"constraints",
",",
"autoincrement_column",
"=",
"None",
")",
":",
"# Fields",
"fields",
"=",
"[",
"]",
"for",
"column",
"in",
"columns",
":",
"if",
"column",
".",
"name",
"==",
"autoincrement_column",
":",
"continue",
"field_type",
"=",
"self",
".",
"restore_type",
"(",
"column",
".",
"type",
")",
"field",
"=",
"{",
"'name'",
":",
"column",
".",
"name",
",",
"'type'",
":",
"field_type",
"}",
"if",
"not",
"column",
".",
"nullable",
":",
"field",
"[",
"'constraints'",
"]",
"=",
"{",
"'required'",
":",
"True",
"}",
"fields",
".",
"append",
"(",
"field",
")",
"# Primary key",
"pk",
"=",
"[",
"]",
"for",
"constraint",
"in",
"constraints",
":",
"if",
"isinstance",
"(",
"constraint",
",",
"sa",
".",
"PrimaryKeyConstraint",
")",
":",
"for",
"column",
"in",
"constraint",
".",
"columns",
":",
"if",
"column",
".",
"name",
"==",
"autoincrement_column",
":",
"continue",
"pk",
".",
"append",
"(",
"column",
".",
"name",
")",
"# Foreign keys",
"fks",
"=",
"[",
"]",
"if",
"self",
".",
"__dialect",
"==",
"'postgresql'",
":",
"for",
"constraint",
"in",
"constraints",
":",
"if",
"isinstance",
"(",
"constraint",
",",
"sa",
".",
"ForeignKeyConstraint",
")",
":",
"resource",
"=",
"''",
"own_fields",
"=",
"[",
"]",
"foreign_fields",
"=",
"[",
"]",
"for",
"element",
"in",
"constraint",
".",
"elements",
":",
"own_fields",
".",
"append",
"(",
"element",
".",
"parent",
".",
"name",
")",
"if",
"element",
".",
"column",
".",
"table",
".",
"name",
"!=",
"table_name",
":",
"resource",
"=",
"self",
".",
"restore_bucket",
"(",
"element",
".",
"column",
".",
"table",
".",
"name",
")",
"foreign_fields",
".",
"append",
"(",
"element",
".",
"column",
".",
"name",
")",
"if",
"len",
"(",
"own_fields",
")",
"==",
"len",
"(",
"foreign_fields",
")",
"==",
"1",
":",
"own_fields",
"=",
"own_fields",
".",
"pop",
"(",
")",
"foreign_fields",
"=",
"foreign_fields",
".",
"pop",
"(",
")",
"fks",
".",
"append",
"(",
"{",
"'fields'",
":",
"own_fields",
",",
"'reference'",
":",
"{",
"'resource'",
":",
"resource",
",",
"'fields'",
":",
"foreign_fields",
"}",
",",
"}",
")",
"# Desscriptor",
"descriptor",
"=",
"{",
"}",
"descriptor",
"[",
"'fields'",
"]",
"=",
"fields",
"if",
"len",
"(",
"pk",
")",
">",
"0",
":",
"if",
"len",
"(",
"pk",
")",
"==",
"1",
":",
"pk",
"=",
"pk",
".",
"pop",
"(",
")",
"descriptor",
"[",
"'primaryKey'",
"]",
"=",
"pk",
"if",
"len",
"(",
"fks",
")",
">",
"0",
":",
"descriptor",
"[",
"'foreignKeys'",
"]",
"=",
"fks",
"return",
"descriptor"
] | Restore descriptor from SQL | [
"Restore",
"descriptor",
"from",
"SQL"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L166-L221 |
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.restore_row | def restore_row(self, row, schema):
"""Restore row from SQL
"""
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row | python | def restore_row(self, row, schema):
"""Restore row from SQL
"""
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row | [
"def",
"restore_row",
"(",
"self",
",",
"row",
",",
"schema",
")",
":",
"row",
"=",
"list",
"(",
"row",
")",
"for",
"index",
",",
"field",
"in",
"enumerate",
"(",
"schema",
".",
"fields",
")",
":",
"if",
"self",
".",
"__dialect",
"==",
"'postgresql'",
":",
"if",
"field",
".",
"type",
"in",
"[",
"'array'",
",",
"'object'",
"]",
":",
"continue",
"row",
"[",
"index",
"]",
"=",
"field",
".",
"cast_value",
"(",
"row",
"[",
"index",
"]",
")",
"return",
"row"
] | Restore row from SQL | [
"Restore",
"row",
"from",
"SQL"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L223-L232 |
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.restore_type | def restore_type(self, type):
"""Restore type from SQL
"""
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type | python | def restore_type(self, type):
"""Restore type from SQL
"""
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type | [
"def",
"restore_type",
"(",
"self",
",",
"type",
")",
":",
"# All dialects",
"mapping",
"=",
"{",
"ARRAY",
":",
"'array'",
",",
"sa",
".",
"Boolean",
":",
"'boolean'",
",",
"sa",
".",
"Date",
":",
"'date'",
",",
"sa",
".",
"DateTime",
":",
"'datetime'",
",",
"sa",
".",
"Float",
":",
"'number'",
",",
"sa",
".",
"Integer",
":",
"'integer'",
",",
"JSONB",
":",
"'object'",
",",
"JSON",
":",
"'object'",
",",
"sa",
".",
"Numeric",
":",
"'number'",
",",
"sa",
".",
"Text",
":",
"'string'",
",",
"sa",
".",
"Time",
":",
"'time'",
",",
"sa",
".",
"VARCHAR",
":",
"'string'",
",",
"UUID",
":",
"'string'",
",",
"}",
"# Get field type",
"field_type",
"=",
"None",
"for",
"key",
",",
"value",
"in",
"mapping",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"type",
",",
"key",
")",
":",
"field_type",
"=",
"value",
"# Not supported",
"if",
"field_type",
"is",
"None",
":",
"message",
"=",
"'Type \"%s\" is not supported'",
"raise",
"tableschema",
".",
"exceptions",
".",
"StorageError",
"(",
"message",
"%",
"type",
")",
"return",
"field_type"
] | Restore type from SQL | [
"Restore",
"type",
"from",
"SQL"
] | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L234-L266 |
varunsrin/one-py | onepy/onmanager.py | ONProcess.open_hierarchy | def open_hierarchy(self, path, relative_to_object_id, object_id, create_file_type=0):
"""
CreateFileType
0 - Creates no new object.
1 - Creates a notebook with the specified name at the specified location.
2 - Creates a section group with the specified name at the specified location.
3 - Creates a section with the specified name at the specified location.
"""
try:
return(self.process.OpenHierarchy(path, relative_to_object_id, "", create_file_type))
except Exception as e:
print(e)
print("Could not Open Hierarchy") | python | def open_hierarchy(self, path, relative_to_object_id, object_id, create_file_type=0):
"""
CreateFileType
0 - Creates no new object.
1 - Creates a notebook with the specified name at the specified location.
2 - Creates a section group with the specified name at the specified location.
3 - Creates a section with the specified name at the specified location.
"""
try:
return(self.process.OpenHierarchy(path, relative_to_object_id, "", create_file_type))
except Exception as e:
print(e)
print("Could not Open Hierarchy") | [
"def",
"open_hierarchy",
"(",
"self",
",",
"path",
",",
"relative_to_object_id",
",",
"object_id",
",",
"create_file_type",
"=",
"0",
")",
":",
"try",
":",
"return",
"(",
"self",
".",
"process",
".",
"OpenHierarchy",
"(",
"path",
",",
"relative_to_object_id",
",",
"\"\"",
",",
"create_file_type",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"print",
"(",
"\"Could not Open Hierarchy\"",
")"
] | CreateFileType
0 - Creates no new object.
1 - Creates a notebook with the specified name at the specified location.
2 - Creates a section group with the specified name at the specified location.
3 - Creates a section with the specified name at the specified location. | [
"CreateFileType",
"0",
"-",
"Creates",
"no",
"new",
"object",
".",
"1",
"-",
"Creates",
"a",
"notebook",
"with",
"the",
"specified",
"name",
"at",
"the",
"specified",
"location",
".",
"2",
"-",
"Creates",
"a",
"section",
"group",
"with",
"the",
"specified",
"name",
"at",
"the",
"specified",
"location",
".",
"3",
"-",
"Creates",
"a",
"section",
"with",
"the",
"specified",
"name",
"at",
"the",
"specified",
"location",
"."
] | train | https://github.com/varunsrin/one-py/blob/8fcf021bcf776a1802a69f50dfd180daf83536ff/onepy/onmanager.py#L53-L65 |
varunsrin/one-py | onepy/onmanager.py | ONProcess.create_new_page | def create_new_page (self, section_id, new_page_style=0):
"""
NewPageStyle
0 - Create a Page that has Default Page Style
1 - Create a blank page with no title
2 - Createa blank page that has no title
"""
try:
self.process.CreateNewPage(section_id, "", new_page_style)
except Exception as e:
print(e)
print("Unable to create the page") | python | def create_new_page (self, section_id, new_page_style=0):
"""
NewPageStyle
0 - Create a Page that has Default Page Style
1 - Create a blank page with no title
2 - Createa blank page that has no title
"""
try:
self.process.CreateNewPage(section_id, "", new_page_style)
except Exception as e:
print(e)
print("Unable to create the page") | [
"def",
"create_new_page",
"(",
"self",
",",
"section_id",
",",
"new_page_style",
"=",
"0",
")",
":",
"try",
":",
"self",
".",
"process",
".",
"CreateNewPage",
"(",
"section_id",
",",
"\"\"",
",",
"new_page_style",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"print",
"(",
"\"Unable to create the page\"",
")"
] | NewPageStyle
0 - Create a Page that has Default Page Style
1 - Create a blank page with no title
2 - Createa blank page that has no title | [
"NewPageStyle",
"0",
"-",
"Create",
"a",
"Page",
"that",
"has",
"Default",
"Page",
"Style",
"1",
"-",
"Create",
"a",
"blank",
"page",
"with",
"no",
"title",
"2",
"-",
"Createa",
"blank",
"page",
"that",
"has",
"no",
"title"
] | train | https://github.com/varunsrin/one-py/blob/8fcf021bcf776a1802a69f50dfd180daf83536ff/onepy/onmanager.py#L75-L86 |
varunsrin/one-py | onepy/onmanager.py | ONProcess.get_page_content | def get_page_content(self, page_id, page_info=0):
"""
PageInfo
0 - Returns only basic page content, without selection markup and binary data objects. This is the standard value to pass.
1 - Returns page content with no selection markup, but with all binary data.
2 - Returns page content with selection markup, but no binary data.
3 - Returns page content with selection markup and all binary data.
"""
try:
return(self.process.GetPageContent(page_id, "", page_info))
except Exception as e:
print(e)
print("Could not get Page Content") | python | def get_page_content(self, page_id, page_info=0):
"""
PageInfo
0 - Returns only basic page content, without selection markup and binary data objects. This is the standard value to pass.
1 - Returns page content with no selection markup, but with all binary data.
2 - Returns page content with selection markup, but no binary data.
3 - Returns page content with selection markup and all binary data.
"""
try:
return(self.process.GetPageContent(page_id, "", page_info))
except Exception as e:
print(e)
print("Could not get Page Content") | [
"def",
"get_page_content",
"(",
"self",
",",
"page_id",
",",
"page_info",
"=",
"0",
")",
":",
"try",
":",
"return",
"(",
"self",
".",
"process",
".",
"GetPageContent",
"(",
"page_id",
",",
"\"\"",
",",
"page_info",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"print",
"(",
"\"Could not get Page Content\"",
")"
] | PageInfo
0 - Returns only basic page content, without selection markup and binary data objects. This is the standard value to pass.
1 - Returns page content with no selection markup, but with all binary data.
2 - Returns page content with selection markup, but no binary data.
3 - Returns page content with selection markup and all binary data. | [
"PageInfo",
"0",
"-",
"Returns",
"only",
"basic",
"page",
"content",
"without",
"selection",
"markup",
"and",
"binary",
"data",
"objects",
".",
"This",
"is",
"the",
"standard",
"value",
"to",
"pass",
".",
"1",
"-",
"Returns",
"page",
"content",
"with",
"no",
"selection",
"markup",
"but",
"with",
"all",
"binary",
"data",
".",
"2",
"-",
"Returns",
"page",
"content",
"with",
"selection",
"markup",
"but",
"no",
"binary",
"data",
".",
"3",
"-",
"Returns",
"page",
"content",
"with",
"selection",
"markup",
"and",
"all",
"binary",
"data",
"."
] | train | https://github.com/varunsrin/one-py/blob/8fcf021bcf776a1802a69f50dfd180daf83536ff/onepy/onmanager.py#L95-L107 |
varunsrin/one-py | onepy/onmanager.py | ONProcess.publish | def publish(self, hierarchy_id, target_file_path, publish_format, clsid_of_exporter=""):
"""
PublishFormat
0 - Published page is in .one format.
1 - Published page is in .onea format.
2 - Published page is in .mht format.
3 - Published page is in .pdf format.
4 - Published page is in .xps format.
5 - Published page is in .doc or .docx format.
6 - Published page is in enhanced metafile (.emf) format.
"""
try:
self.process.Publish(hierarchy_id, target_file_path, publish_format, clsid_of_exporter)
except Exception as e:
print(e)
print("Could not Publish") | python | def publish(self, hierarchy_id, target_file_path, publish_format, clsid_of_exporter=""):
"""
PublishFormat
0 - Published page is in .one format.
1 - Published page is in .onea format.
2 - Published page is in .mht format.
3 - Published page is in .pdf format.
4 - Published page is in .xps format.
5 - Published page is in .doc or .docx format.
6 - Published page is in enhanced metafile (.emf) format.
"""
try:
self.process.Publish(hierarchy_id, target_file_path, publish_format, clsid_of_exporter)
except Exception as e:
print(e)
print("Could not Publish") | [
"def",
"publish",
"(",
"self",
",",
"hierarchy_id",
",",
"target_file_path",
",",
"publish_format",
",",
"clsid_of_exporter",
"=",
"\"\"",
")",
":",
"try",
":",
"self",
".",
"process",
".",
"Publish",
"(",
"hierarchy_id",
",",
"target_file_path",
",",
"publish_format",
",",
"clsid_of_exporter",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"print",
"(",
"\"Could not Publish\"",
")"
] | PublishFormat
0 - Published page is in .one format.
1 - Published page is in .onea format.
2 - Published page is in .mht format.
3 - Published page is in .pdf format.
4 - Published page is in .xps format.
5 - Published page is in .doc or .docx format.
6 - Published page is in enhanced metafile (.emf) format. | [
"PublishFormat",
"0",
"-",
"Published",
"page",
"is",
"in",
".",
"one",
"format",
".",
"1",
"-",
"Published",
"page",
"is",
"in",
".",
"onea",
"format",
".",
"2",
"-",
"Published",
"page",
"is",
"in",
".",
"mht",
"format",
".",
"3",
"-",
"Published",
"page",
"is",
"in",
".",
"pdf",
"format",
".",
"4",
"-",
"Published",
"page",
"is",
"in",
".",
"xps",
"format",
".",
"5",
"-",
"Published",
"page",
"is",
"in",
".",
"doc",
"or",
".",
"docx",
"format",
".",
"6",
"-",
"Published",
"page",
"is",
"in",
"enhanced",
"metafile",
"(",
".",
"emf",
")",
"format",
"."
] | train | https://github.com/varunsrin/one-py/blob/8fcf021bcf776a1802a69f50dfd180daf83536ff/onepy/onmanager.py#L141-L156 |
varunsrin/one-py | onepy/onmanager.py | ONProcess.get_special_location | def get_special_location(self, special_location=0):
"""
SpecialLocation
0 - Gets the path to the Backup Folders folder location.
1 - Gets the path to the Unfiled Notes folder location.
2 - Gets the path to the Default Notebook folder location.
"""
try:
return(self.process.GetSpecialLocation(special_location))
except Exception as e:
print(e)
print("Could not retreive special location") | python | def get_special_location(self, special_location=0):
"""
SpecialLocation
0 - Gets the path to the Backup Folders folder location.
1 - Gets the path to the Unfiled Notes folder location.
2 - Gets the path to the Default Notebook folder location.
"""
try:
return(self.process.GetSpecialLocation(special_location))
except Exception as e:
print(e)
print("Could not retreive special location") | [
"def",
"get_special_location",
"(",
"self",
",",
"special_location",
"=",
"0",
")",
":",
"try",
":",
"return",
"(",
"self",
".",
"process",
".",
"GetSpecialLocation",
"(",
"special_location",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"print",
"(",
"\"Could not retreive special location\"",
")"
] | SpecialLocation
0 - Gets the path to the Backup Folders folder location.
1 - Gets the path to the Unfiled Notes folder location.
2 - Gets the path to the Default Notebook folder location. | [
"SpecialLocation",
"0",
"-",
"Gets",
"the",
"path",
"to",
"the",
"Backup",
"Folders",
"folder",
"location",
".",
"1",
"-",
"Gets",
"the",
"path",
"to",
"the",
"Unfiled",
"Notes",
"folder",
"location",
".",
"2",
"-",
"Gets",
"the",
"path",
"to",
"the",
"Default",
"Notebook",
"folder",
"location",
"."
] | train | https://github.com/varunsrin/one-py/blob/8fcf021bcf776a1802a69f50dfd180daf83536ff/onepy/onmanager.py#L179-L190 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | memory | def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info | python | def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info | [
"def",
"memory",
"(",
")",
":",
"mem_info",
"=",
"dict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"psutil",
".",
"virtual_memory",
"(",
")",
".",
"_asdict",
"(",
")",
".",
"items",
"(",
")",
":",
"mem_info",
"[",
"k",
"]",
"=",
"int",
"(",
"v",
")",
"return",
"mem_info"
] | Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system. | [
"Determine",
"memory",
"specifications",
"of",
"the",
"machine",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L69-L83 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | get_chunk_size | def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1) | python | def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1) | [
"def",
"get_chunk_size",
"(",
"N",
",",
"n",
")",
":",
"mem_free",
"=",
"memory",
"(",
")",
"[",
"'free'",
"]",
"if",
"mem_free",
">",
"60000000",
":",
"chunk_size",
"=",
"int",
"(",
"(",
"(",
"mem_free",
"-",
"10000000",
")",
"*",
"1000",
")",
"/",
"(",
"4",
"*",
"n",
"*",
"N",
")",
")",
"return",
"chunk_size",
"elif",
"mem_free",
">",
"40000000",
":",
"chunk_size",
"=",
"int",
"(",
"(",
"(",
"mem_free",
"-",
"7000000",
")",
"*",
"1000",
")",
"/",
"(",
"4",
"*",
"n",
"*",
"N",
")",
")",
"return",
"chunk_size",
"elif",
"mem_free",
">",
"14000000",
":",
"chunk_size",
"=",
"int",
"(",
"(",
"(",
"mem_free",
"-",
"2000000",
")",
"*",
"1000",
")",
"/",
"(",
"4",
"*",
"n",
"*",
"N",
")",
")",
"return",
"chunk_size",
"elif",
"mem_free",
">",
"8000000",
":",
"chunk_size",
"=",
"int",
"(",
"(",
"(",
"mem_free",
"-",
"1400000",
")",
"*",
"1000",
")",
"/",
"(",
"4",
"*",
"n",
"*",
"N",
")",
")",
"return",
"chunk_size",
"elif",
"mem_free",
">",
"2000000",
":",
"chunk_size",
"=",
"int",
"(",
"(",
"(",
"mem_free",
"-",
"900000",
")",
"*",
"1000",
")",
"/",
"(",
"4",
"*",
"n",
"*",
"N",
")",
")",
"return",
"chunk_size",
"elif",
"mem_free",
">",
"1000000",
":",
"chunk_size",
"=",
"int",
"(",
"(",
"(",
"mem_free",
"-",
"400000",
")",
"*",
"1000",
")",
"/",
"(",
"4",
"*",
"n",
"*",
"N",
")",
")",
"return",
"chunk_size",
"else",
":",
"print",
"(",
"\"\\nERROR: Cluster_Ensembles: get_chunk_size: \"",
"\"this machine does not have enough free memory resources \"",
"\"to perform ensemble clustering.\\n\"",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'. | [
"Given",
"a",
"two",
"-",
"dimensional",
"array",
"with",
"a",
"dimension",
"of",
"size",
"N",
"determine",
"the",
"number",
"of",
"rows",
"or",
"columns",
"that",
"can",
"fit",
"into",
"memory",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L86-L127 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | get_compression_filter | def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS | python | def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS | [
"def",
"get_compression_filter",
"(",
"byte_counts",
")",
":",
"assert",
"isinstance",
"(",
"byte_counts",
",",
"numbers",
".",
"Integral",
")",
"and",
"byte_counts",
">",
"0",
"if",
"2",
"*",
"byte_counts",
">",
"1000",
"*",
"memory",
"(",
")",
"[",
"'free'",
"]",
":",
"try",
":",
"FILTERS",
"=",
"tables",
".",
"filters",
"(",
"complevel",
"=",
"5",
",",
"complib",
"=",
"'blosc'",
",",
"shuffle",
"=",
"True",
",",
"least_significant_digit",
"=",
"6",
")",
"except",
"tables",
".",
"FiltersWarning",
":",
"FILTERS",
"=",
"tables",
".",
"filters",
"(",
"complevel",
"=",
"5",
",",
"complib",
"=",
"'lzo'",
",",
"shuffle",
"=",
"True",
",",
"least_significant_digit",
"=",
"6",
")",
"else",
":",
"FILTERS",
"=",
"None",
"return",
"FILTERS"
] | Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class | [
"Determine",
"whether",
"or",
"not",
"to",
"use",
"a",
"compression",
"on",
"the",
"array",
"stored",
"in",
"a",
"hierarchical",
"data",
"format",
"and",
"which",
"compression",
"library",
"to",
"use",
"to",
"that",
"purpose",
".",
"Compression",
"reduces",
"the",
"HDF5",
"file",
"size",
"and",
"also",
"helps",
"improving",
"I",
"/",
"O",
"efficiency",
"for",
"large",
"datasets",
".",
"Parameters",
"----------",
"byte_counts",
":",
"int",
"Returns",
"-------",
"FILTERS",
":",
"instance",
"of",
"the",
"tables",
".",
"Filters",
"class"
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L130-L157 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | build_hypergraph_adjacency | def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency | python | def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency | [
"def",
"build_hypergraph_adjacency",
"(",
"cluster_runs",
")",
":",
"N_runs",
"=",
"cluster_runs",
".",
"shape",
"[",
"0",
"]",
"hypergraph_adjacency",
"=",
"create_membership_matrix",
"(",
"cluster_runs",
"[",
"0",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"N_runs",
")",
":",
"hypergraph_adjacency",
"=",
"scipy",
".",
"sparse",
".",
"vstack",
"(",
"[",
"hypergraph_adjacency",
",",
"create_membership_matrix",
"(",
"cluster_runs",
"[",
"i",
"]",
")",
"]",
",",
"format",
"=",
"'csr'",
")",
"return",
"hypergraph_adjacency"
] | Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input. | [
"Return",
"the",
"adjacency",
"matrix",
"to",
"a",
"hypergraph",
"in",
"sparse",
"matrix",
"representation",
".",
"Parameters",
"----------",
"cluster_runs",
":",
"array",
"of",
"shape",
"(",
"n_partitions",
"n_samples",
")",
"Returns",
"-------",
"hypergraph_adjacency",
":",
"compressed",
"sparse",
"row",
"matrix",
"Represents",
"the",
"hypergraph",
"associated",
"with",
"an",
"ensemble",
"of",
"partitions",
"each",
"partition",
"corresponding",
"to",
"a",
"row",
"of",
"the",
"array",
"cluster_runs",
"provided",
"at",
"input",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L160-L183 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | store_hypergraph_adjacency | def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array | python | def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array | [
"def",
"store_hypergraph_adjacency",
"(",
"hypergraph_adjacency",
",",
"hdf5_file_name",
")",
":",
"assert",
"(",
"hypergraph_adjacency",
".",
"__class__",
"==",
"scipy",
".",
"sparse",
".",
"csr",
".",
"csr_matrix",
")",
"byte_counts",
"=",
"hypergraph_adjacency",
".",
"data",
".",
"nbytes",
"+",
"hypergraph_adjacency",
".",
"indices",
".",
"nbytes",
"+",
"hypergraph_adjacency",
".",
"indptr",
".",
"nbytes",
"FILTERS",
"=",
"get_compression_filter",
"(",
"byte_counts",
")",
"with",
"tables",
".",
"open_file",
"(",
"hdf5_file_name",
",",
"'r+'",
")",
"as",
"fileh",
":",
"for",
"par",
"in",
"(",
"'data'",
",",
"'indices'",
",",
"'indptr'",
",",
"'shape'",
")",
":",
"try",
":",
"n",
"=",
"getattr",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"par",
")",
"n",
".",
"_f_remove",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"array",
"=",
"np",
".",
"array",
"(",
"getattr",
"(",
"hypergraph_adjacency",
",",
"par",
")",
")",
"atom",
"=",
"tables",
".",
"Atom",
".",
"from_dtype",
"(",
"array",
".",
"dtype",
")",
"ds",
"=",
"fileh",
".",
"create_carray",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"par",
",",
"atom",
",",
"array",
".",
"shape",
",",
"filters",
"=",
"FILTERS",
")",
"ds",
"[",
":",
"]",
"=",
"array"
] | Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string | [
"Write",
"an",
"hypergraph",
"adjacency",
"to",
"disk",
"to",
"disk",
"in",
"an",
"HDF5",
"data",
"structure",
".",
"Parameters",
"----------",
"hypergraph_adjacency",
":",
"compressed",
"sparse",
"row",
"matrix",
"hdf5_file_name",
":",
"file",
"handle",
"or",
"string"
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L186-L215 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | load_hypergraph_adjacency | def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency | python | def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency | [
"def",
"load_hypergraph_adjacency",
"(",
"hdf5_file_name",
")",
":",
"with",
"tables",
".",
"open_file",
"(",
"hdf5_file_name",
",",
"'r+'",
")",
"as",
"fileh",
":",
"pars",
"=",
"[",
"]",
"for",
"par",
"in",
"(",
"'data'",
",",
"'indices'",
",",
"'indptr'",
",",
"'shape'",
")",
":",
"pars",
".",
"append",
"(",
"getattr",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"par",
")",
".",
"read",
"(",
")",
")",
"hypergraph_adjacency",
"=",
"scipy",
".",
"sparse",
".",
"csr_matrix",
"(",
"tuple",
"(",
"pars",
"[",
":",
"3",
"]",
")",
",",
"shape",
"=",
"pars",
"[",
"3",
"]",
")",
"return",
"hypergraph_adjacency"
] | Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix | [
"Parameters",
"----------",
"hdf5_file_name",
":",
"file",
"handle",
"or",
"string",
"Returns",
"-------",
"hypergraph_adjacency",
":",
"compressed",
"sparse",
"row",
"matrix"
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L218-L237 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | cluster_ensembles | def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)] | python | def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)] | [
"def",
"cluster_ensembles",
"(",
"cluster_runs",
",",
"hdf5_file_name",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"N_clusters_max",
"=",
"None",
")",
":",
"if",
"hdf5_file_name",
"is",
"None",
":",
"hdf5_file_name",
"=",
"'./Cluster_Ensembles.h5'",
"fileh",
"=",
"tables",
".",
"open_file",
"(",
"hdf5_file_name",
",",
"'w'",
")",
"fileh",
".",
"create_group",
"(",
"fileh",
".",
"root",
",",
"'consensus_group'",
")",
"fileh",
".",
"close",
"(",
")",
"cluster_ensemble",
"=",
"[",
"]",
"score",
"=",
"np",
".",
"empty",
"(",
"0",
")",
"if",
"cluster_runs",
".",
"shape",
"[",
"1",
"]",
">",
"10000",
":",
"consensus_functions",
"=",
"[",
"HGPA",
",",
"MCLA",
"]",
"function_names",
"=",
"[",
"'HGPA'",
",",
"'MCLA'",
"]",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: cluster_ensembles: \"",
"\"due to a rather large number of cells in your data-set, \"",
"\"using only 'HyperGraph Partitioning Algorithm' (HGPA) \"",
"\"and 'Meta-CLustering Algorithm' (MCLA) \"",
"\"as ensemble consensus functions.\\n\"",
")",
"else",
":",
"consensus_functions",
"=",
"[",
"CSPA",
",",
"HGPA",
",",
"MCLA",
"]",
"function_names",
"=",
"[",
"'CSPA'",
",",
"'HGPA'",
",",
"'MCLA'",
"]",
"hypergraph_adjacency",
"=",
"build_hypergraph_adjacency",
"(",
"cluster_runs",
")",
"store_hypergraph_adjacency",
"(",
"hypergraph_adjacency",
",",
"hdf5_file_name",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"consensus_functions",
")",
")",
":",
"cluster_ensemble",
".",
"append",
"(",
"consensus_functions",
"[",
"i",
"]",
"(",
"hdf5_file_name",
",",
"cluster_runs",
",",
"verbose",
",",
"N_clusters_max",
")",
")",
"score",
"=",
"np",
".",
"append",
"(",
"score",
",",
"ceEvalMutual",
"(",
"cluster_runs",
",",
"cluster_ensemble",
"[",
"i",
"]",
",",
"verbose",
")",
")",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: cluster_ensembles: \"",
"\"{0} at {1}.\"",
".",
"format",
"(",
"function_names",
"[",
"i",
"]",
",",
"score",
"[",
"i",
"]",
")",
")",
"print",
"(",
"'*****'",
")",
"return",
"cluster_ensemble",
"[",
"np",
".",
"argmax",
"(",
"score",
")",
"]"
] | Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 | [
"Call",
"up",
"to",
"three",
"different",
"functions",
"for",
"heuristic",
"ensemble",
"clustering",
"(",
"namely",
"CSPA",
"HGPA",
"and",
"MCLA",
")",
"then",
"select",
"as",
"the",
"definitive",
"consensus",
"clustering",
"the",
"one",
"with",
"the",
"highest",
"average",
"mutual",
"information",
"score",
"between",
"its",
"vector",
"of",
"consensus",
"labels",
"and",
"the",
"vectors",
"of",
"labels",
"associated",
"to",
"each",
"partition",
"from",
"the",
"ensemble",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L240-L315 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | ceEvalMutual | def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices | python | def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices | [
"def",
"ceEvalMutual",
"(",
"cluster_runs",
",",
"cluster_ensemble",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"cluster_ensemble",
"is",
"None",
":",
"return",
"0.0",
"if",
"reduce",
"(",
"operator",
".",
"mul",
",",
"cluster_runs",
".",
"shape",
",",
"1",
")",
"==",
"max",
"(",
"cluster_runs",
".",
"shape",
")",
":",
"cluster_runs",
"=",
"cluster_runs",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"weighted_average_mutual_information",
"=",
"0",
"N_labelled_indices",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"cluster_runs",
".",
"shape",
"[",
"0",
"]",
")",
":",
"labelled_indices",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isfinite",
"(",
"cluster_runs",
"[",
"i",
"]",
")",
")",
"[",
"0",
"]",
"N",
"=",
"labelled_indices",
".",
"size",
"x",
"=",
"np",
".",
"reshape",
"(",
"checkcl",
"(",
"cluster_ensemble",
"[",
"labelled_indices",
"]",
",",
"verbose",
")",
",",
"newshape",
"=",
"N",
")",
"y",
"=",
"np",
".",
"reshape",
"(",
"checkcl",
"(",
"np",
".",
"rint",
"(",
"cluster_runs",
"[",
"i",
",",
"labelled_indices",
"]",
")",
",",
"verbose",
")",
",",
"newshape",
"=",
"N",
")",
"q",
"=",
"normalized_mutual_info_score",
"(",
"x",
",",
"y",
")",
"weighted_average_mutual_information",
"+=",
"q",
"*",
"N",
"N_labelled_indices",
"+=",
"N",
"return",
"float",
"(",
"weighted_average_mutual_information",
")",
"/",
"N_labelled_indices"
] | Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set. | [
"Compute",
"a",
"weighted",
"average",
"of",
"the",
"mutual",
"information",
"with",
"the",
"known",
"labels",
"the",
"weights",
"being",
"proportional",
"to",
"the",
"fraction",
"of",
"known",
"labels",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L318-L368 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | checkcl | def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run | python | def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run | [
"def",
"checkcl",
"(",
"cluster_run",
",",
"verbose",
"=",
"False",
")",
":",
"cluster_run",
"=",
"np",
".",
"asanyarray",
"(",
"cluster_run",
")",
"if",
"cluster_run",
".",
"size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"\\nERROR: Cluster_Ensembles: checkcl: \"",
"\"empty vector provided as input.\\n\"",
")",
"elif",
"reduce",
"(",
"operator",
".",
"mul",
",",
"cluster_run",
".",
"shape",
",",
"1",
")",
"!=",
"max",
"(",
"cluster_run",
".",
"shape",
")",
":",
"raise",
"ValueError",
"(",
"\"\\nERROR: Cluster_Ensembles: checkl: \"",
"\"problem in dimensions of the cluster label vector \"",
"\"under consideration.\\n\"",
")",
"elif",
"np",
".",
"where",
"(",
"np",
".",
"isnan",
"(",
"cluster_run",
")",
")",
"[",
"0",
"]",
".",
"size",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"\\nERROR: Cluster_Ensembles: checkl: vector of cluster \"",
"\"labellings provided as input contains at least one 'NaN'.\\n\"",
")",
"else",
":",
"min_label",
"=",
"np",
".",
"amin",
"(",
"cluster_run",
")",
"if",
"min_label",
"<",
"0",
":",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checkcl: detected negative values \"",
"\"as cluster labellings.\"",
")",
"cluster_run",
"-=",
"min_label",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checkcl: \"",
"\"offset to a minimum value of '0'.\"",
")",
"x",
"=",
"one_to_max",
"(",
"cluster_run",
")",
"if",
"np",
".",
"amax",
"(",
"cluster_run",
")",
"!=",
"np",
".",
"amax",
"(",
"x",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checkcl: the vector cluster \"",
"\"labellings provided is not a dense integer mapping.\"",
")",
"cluster_run",
"=",
"x",
"if",
"verbose",
":",
"print",
"(",
"\"INFO: Cluster_Ensembles: checkcl: brought modification \"",
"\"to this vector so that its labels range \"",
"\"from 0 to {0}, included.\\n\"",
".",
"format",
"(",
"np",
".",
"amax",
"(",
"cluster_run",
")",
")",
")",
"return",
"cluster_run"
] | Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left. | [
"Ensure",
"that",
"a",
"cluster",
"labelling",
"is",
"in",
"a",
"valid",
"format",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L371-L430 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | one_to_max | def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result | python | def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result | [
"def",
"one_to_max",
"(",
"array_in",
")",
":",
"x",
"=",
"np",
".",
"asanyarray",
"(",
"array_in",
")",
"N_in",
"=",
"x",
".",
"size",
"array_in",
"=",
"x",
".",
"reshape",
"(",
"N_in",
")",
"sorted_array",
"=",
"np",
".",
"sort",
"(",
"array_in",
")",
"sorting_indices",
"=",
"np",
".",
"argsort",
"(",
"array_in",
")",
"last",
"=",
"np",
".",
"nan",
"current_index",
"=",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"N_in",
")",
":",
"if",
"last",
"!=",
"sorted_array",
"[",
"i",
"]",
"or",
"np",
".",
"isnan",
"(",
"last",
")",
":",
"last",
"=",
"sorted_array",
"[",
"i",
"]",
"current_index",
"+=",
"1",
"sorted_array",
"[",
"i",
"]",
"=",
"current_index",
"result",
"=",
"np",
".",
"empty",
"(",
"N_in",
",",
"dtype",
"=",
"int",
")",
"result",
"[",
"sorting_indices",
"]",
"=",
"sorted_array",
"return",
"result"
] | Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities. | [
"Alter",
"a",
"vector",
"of",
"cluster",
"labels",
"to",
"a",
"dense",
"mapping",
".",
"Given",
"that",
"this",
"function",
"is",
"herein",
"always",
"called",
"after",
"passing",
"a",
"vector",
"to",
"the",
"function",
"checkcl",
"one_to_max",
"relies",
"on",
"the",
"assumption",
"that",
"cluster_run",
"does",
"not",
"contain",
"any",
"NaN",
"entries",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L433-L469 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | checks | def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.") | python | def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.") | [
"def",
"checks",
"(",
"similarities",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"similarities",
".",
"size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"\\nERROR: Cluster_Ensembles: checks: the similarities \"",
"\"matrix provided as input happens to be empty.\\n\"",
")",
"elif",
"np",
".",
"where",
"(",
"np",
".",
"isnan",
"(",
"similarities",
")",
")",
"[",
"0",
"]",
".",
"size",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"\\nERROR: Cluster_Ensembles: checks: input similarities \"",
"\"matrix contains at least one 'NaN'.\\n\"",
")",
"elif",
"np",
".",
"where",
"(",
"np",
".",
"isinf",
"(",
"similarities",
")",
")",
"[",
"0",
"]",
".",
"size",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"\\nERROR: Cluster_Ensembles: checks: at least one infinite entry \"",
"\"detected in input similarities matrix.\\n\"",
")",
"else",
":",
"if",
"np",
".",
"where",
"(",
"np",
".",
"logical_not",
"(",
"np",
".",
"isreal",
"(",
"similarities",
")",
")",
")",
"[",
"0",
"]",
".",
"size",
"!=",
"0",
":",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checks: complex entries found \"",
"\"in the similarities matrix.\"",
")",
"similarities",
"=",
"similarities",
".",
"real",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checks: \"",
"\"truncated to their real components.\"",
")",
"if",
"similarities",
".",
"shape",
"[",
"0",
"]",
"!=",
"similarities",
".",
"shape",
"[",
"1",
"]",
":",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checks: non-square matrix provided.\"",
")",
"N_square",
"=",
"min",
"(",
"similarities",
".",
"shape",
")",
"similarities",
"=",
"similarities",
"[",
":",
"N_square",
",",
":",
"N_square",
"]",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.\"",
")",
"max_sim",
"=",
"np",
".",
"amax",
"(",
"similarities",
")",
"min_sim",
"=",
"np",
".",
"amin",
"(",
"similarities",
")",
"if",
"max_sim",
">",
"1",
"or",
"min_sim",
"<",
"0",
":",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checks: strictly negative \"",
"\"or bigger than unity entries spotted in input similarities matrix.\"",
")",
"indices_too_big",
"=",
"np",
".",
"where",
"(",
"similarities",
">",
"1",
")",
"indices_negative",
"=",
"np",
".",
"where",
"(",
"similarities",
"<",
"0",
")",
"similarities",
"[",
"indices_too_big",
"]",
"=",
"1.0",
"similarities",
"[",
"indices_negative",
"]",
"=",
"0.0",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checks: done setting them to \"",
"\"the lower or upper accepted values.\"",
")",
"if",
"not",
"np",
".",
"allclose",
"(",
"similarities",
",",
"np",
".",
"transpose",
"(",
"similarities",
")",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checks: non-symmetric input \"",
"\"similarities matrix.\"",
")",
"similarities",
"=",
"np",
".",
"divide",
"(",
"similarities",
"+",
"np",
".",
"transpose",
"(",
"similarities",
")",
",",
"2.0",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checks: now symmetrized.\"",
")",
"if",
"not",
"np",
".",
"allclose",
"(",
"np",
".",
"diag",
"(",
"similarities",
")",
",",
"np",
".",
"ones",
"(",
"similarities",
".",
"shape",
"[",
"0",
"]",
")",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checks: the self-similarities \"",
"\"provided as input are not all of unit value.\"",
")",
"similarities",
"[",
"np",
".",
"diag_indices",
"(",
"similarities",
".",
"shape",
"[",
"0",
"]",
")",
"]",
"=",
"1",
"if",
"verbose",
":",
"print",
"(",
"\"\\nINFO: Cluster_Ensembles: checks: issue corrected.\"",
")"
] | Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem. | [
"Check",
"that",
"a",
"matrix",
"is",
"a",
"proper",
"similarity",
"matrix",
"and",
"bring",
"appropriate",
"changes",
"if",
"applicable",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L472-L551 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | CSPA | def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max) | python | def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max) | [
"def",
"CSPA",
"(",
"hdf5_file_name",
",",
"cluster_runs",
",",
"verbose",
"=",
"False",
",",
"N_clusters_max",
"=",
"None",
")",
":",
"print",
"(",
"'*****'",
")",
"print",
"(",
"\"INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.\"",
")",
"if",
"N_clusters_max",
"==",
"None",
":",
"N_clusters_max",
"=",
"int",
"(",
"np",
".",
"nanmax",
"(",
"cluster_runs",
")",
")",
"+",
"1",
"N_runs",
"=",
"cluster_runs",
".",
"shape",
"[",
"0",
"]",
"N_samples",
"=",
"cluster_runs",
".",
"shape",
"[",
"1",
"]",
"if",
"N_samples",
">",
"20000",
":",
"raise",
"ValueError",
"(",
"\"\\nERROR: Cluster_Ensembles: CSPA: cannot efficiently \"",
"\"deal with too large a number of cells.\"",
")",
"hypergraph_adjacency",
"=",
"load_hypergraph_adjacency",
"(",
"hdf5_file_name",
")",
"s",
"=",
"scipy",
".",
"sparse",
".",
"csr_matrix",
".",
"dot",
"(",
"hypergraph_adjacency",
".",
"transpose",
"(",
")",
".",
"tocsr",
"(",
")",
",",
"hypergraph_adjacency",
")",
"s",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"asarray",
"(",
"s",
".",
"todense",
"(",
")",
")",
")",
"del",
"hypergraph_adjacency",
"gc",
".",
"collect",
"(",
")",
"checks",
"(",
"np",
".",
"divide",
"(",
"s",
",",
"float",
"(",
"N_runs",
")",
")",
",",
"verbose",
")",
"e_sum_before",
"=",
"s",
".",
"sum",
"(",
")",
"sum_after",
"=",
"100000000.0",
"scale_factor",
"=",
"sum_after",
"/",
"float",
"(",
"e_sum_before",
")",
"with",
"tables",
".",
"open_file",
"(",
"hdf5_file_name",
",",
"'r+'",
")",
"as",
"fileh",
":",
"atom",
"=",
"tables",
".",
"Float32Atom",
"(",
")",
"FILTERS",
"=",
"get_compression_filter",
"(",
"4",
"*",
"(",
"N_samples",
"**",
"2",
")",
")",
"S",
"=",
"fileh",
".",
"create_carray",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"'similarities_CSPA'",
",",
"atom",
",",
"(",
"N_samples",
",",
"N_samples",
")",
",",
"\"Matrix of similarities arising \"",
"\"in Cluster-based Similarity Partitioning\"",
",",
"filters",
"=",
"FILTERS",
")",
"expr",
"=",
"tables",
".",
"Expr",
"(",
"\"s * scale_factor\"",
")",
"expr",
".",
"set_output",
"(",
"S",
")",
"expr",
".",
"eval",
"(",
")",
"chunks_size",
"=",
"get_chunk_size",
"(",
"N_samples",
",",
"3",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"N_samples",
",",
"chunks_size",
")",
":",
"tmp",
"=",
"S",
"[",
"i",
":",
"min",
"(",
"i",
"+",
"chunks_size",
",",
"N_samples",
")",
"]",
"S",
"[",
"i",
":",
"min",
"(",
"i",
"+",
"chunks_size",
",",
"N_samples",
")",
"]",
"=",
"np",
".",
"rint",
"(",
"tmp",
")",
"return",
"metis",
"(",
"hdf5_file_name",
",",
"N_clusters_max",
")"
] | Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 | [
"Cluster",
"-",
"based",
"Similarity",
"Partitioning",
"Algorithm",
"for",
"a",
"consensus",
"function",
".",
"Parameters",
"----------",
"hdf5_file_name",
":",
"file",
"handle",
"or",
"string",
"cluster_runs",
":",
"array",
"of",
"shape",
"(",
"n_partitions",
"n_samples",
")",
"verbose",
":",
"bool",
"optional",
"(",
"default",
"=",
"False",
")",
"N_clusters_max",
":",
"int",
"optional",
"(",
"default",
"=",
"None",
")",
"Returns",
"-------",
"A",
"vector",
"specifying",
"the",
"cluster",
"label",
"to",
"which",
"each",
"sample",
"has",
"been",
"assigned",
"by",
"the",
"CSPA",
"heuristics",
"for",
"consensus",
"clustering",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L554-L623 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | HGPA | def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max) | python | def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max) | [
"def",
"HGPA",
"(",
"hdf5_file_name",
",",
"cluster_runs",
",",
"verbose",
"=",
"False",
",",
"N_clusters_max",
"=",
"None",
")",
":",
"print",
"(",
"'\\n*****'",
")",
"print",
"(",
"\"INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.\"",
")",
"if",
"N_clusters_max",
"==",
"None",
":",
"N_clusters_max",
"=",
"int",
"(",
"np",
".",
"nanmax",
"(",
"cluster_runs",
")",
")",
"+",
"1",
"return",
"hmetis",
"(",
"hdf5_file_name",
",",
"N_clusters_max",
")"
] | HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 | [
"HyperGraph",
"-",
"Partitioning",
"Algorithm",
"for",
"a",
"consensus",
"function",
".",
"Parameters",
"----------",
"hdf5_file_name",
":",
"string",
"or",
"file",
"handle",
"cluster_runs",
":",
"array",
"of",
"shape",
"(",
"n_partitions",
"n_samples",
")",
"verbose",
":",
"bool",
"optional",
"(",
"default",
"=",
"False",
")",
"N_clusters_max",
":",
"int",
"optional",
"(",
"default",
"=",
"None",
")",
"Returns",
"-------",
"A",
"vector",
"specifying",
"the",
"cluster",
"label",
"to",
"which",
"each",
"sample",
"has",
"been",
"assigned",
"by",
"the",
"HGPA",
"approximation",
"algorithm",
"for",
"consensus",
"clustering",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L626-L657 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | MCLA | def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels | python | def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels | [
"def",
"MCLA",
"(",
"hdf5_file_name",
",",
"cluster_runs",
",",
"verbose",
"=",
"False",
",",
"N_clusters_max",
"=",
"None",
")",
":",
"print",
"(",
"'\\n*****'",
")",
"print",
"(",
"'INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.'",
")",
"if",
"N_clusters_max",
"==",
"None",
":",
"N_clusters_max",
"=",
"int",
"(",
"np",
".",
"nanmax",
"(",
"cluster_runs",
")",
")",
"+",
"1",
"N_runs",
"=",
"cluster_runs",
".",
"shape",
"[",
"0",
"]",
"N_samples",
"=",
"cluster_runs",
".",
"shape",
"[",
"1",
"]",
"print",
"(",
"\"INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.\"",
")",
"hypergraph_adjacency",
"=",
"load_hypergraph_adjacency",
"(",
"hdf5_file_name",
")",
"w",
"=",
"hypergraph_adjacency",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"N_rows",
"=",
"hypergraph_adjacency",
".",
"shape",
"[",
"0",
"]",
"print",
"(",
"\"INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. \"",
"\"Starting computation of Jaccard similarity matrix.\"",
")",
"# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.",
"with",
"tables",
".",
"open_file",
"(",
"hdf5_file_name",
",",
"'r+'",
")",
"as",
"fileh",
":",
"FILTERS",
"=",
"get_compression_filter",
"(",
"4",
"*",
"(",
"N_rows",
"**",
"2",
")",
")",
"similarities_MCLA",
"=",
"fileh",
".",
"create_carray",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"'similarities_MCLA'",
",",
"tables",
".",
"Float32Atom",
"(",
")",
",",
"(",
"N_rows",
",",
"N_rows",
")",
",",
"\"Matrix of pairwise Jaccard \"",
"\"similarity scores\"",
",",
"filters",
"=",
"FILTERS",
")",
"scale_factor",
"=",
"100.0",
"print",
"(",
"\"INFO: Cluster_Ensembles: MCLA: \"",
"\"starting computation of Jaccard similarity matrix.\"",
")",
"squared_MCLA",
"=",
"hypergraph_adjacency",
".",
"dot",
"(",
"hypergraph_adjacency",
".",
"transpose",
"(",
")",
")",
"squared_sums",
"=",
"hypergraph_adjacency",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"squared_sums",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"asarray",
"(",
"squared_sums",
")",
")",
"chunks_size",
"=",
"get_chunk_size",
"(",
"N_rows",
",",
"7",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"N_rows",
",",
"chunks_size",
")",
":",
"n_dim",
"=",
"min",
"(",
"chunks_size",
",",
"N_rows",
"-",
"i",
")",
"temp",
"=",
"squared_MCLA",
"[",
"i",
":",
"min",
"(",
"i",
"+",
"chunks_size",
",",
"N_rows",
")",
",",
":",
"]",
".",
"todense",
"(",
")",
"temp",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"asarray",
"(",
"temp",
")",
")",
"x",
"=",
"squared_sums",
"[",
"i",
":",
"min",
"(",
"i",
"+",
"chunks_size",
",",
"N_rows",
")",
"]",
"x",
"=",
"x",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"x",
"=",
"np",
".",
"dot",
"(",
"x",
",",
"np",
".",
"ones",
"(",
"(",
"1",
",",
"squared_sums",
".",
"size",
")",
")",
")",
"y",
"=",
"np",
".",
"dot",
"(",
"np",
".",
"ones",
"(",
"(",
"n_dim",
",",
"1",
")",
")",
",",
"squared_sums",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
")",
"temp",
"=",
"np",
".",
"divide",
"(",
"temp",
",",
"x",
"+",
"y",
"-",
"temp",
")",
"temp",
"*=",
"scale_factor",
"Jaccard_matrix",
"=",
"np",
".",
"rint",
"(",
"temp",
")",
"similarities_MCLA",
"[",
"i",
":",
"min",
"(",
"i",
"+",
"chunks_size",
",",
"N_rows",
")",
"]",
"=",
"Jaccard_matrix",
"del",
"Jaccard_matrix",
",",
"temp",
",",
"x",
",",
"y",
"gc",
".",
"collect",
"(",
")",
"# Done computing the matrix of pairwise Jaccard similarity scores.",
"print",
"(",
"\"INFO: Cluster_Ensembles: MCLA: done computing the matrix of \"",
"\"pairwise Jaccard similarity scores.\"",
")",
"cluster_labels",
"=",
"cmetis",
"(",
"hdf5_file_name",
",",
"N_clusters_max",
",",
"w",
")",
"cluster_labels",
"=",
"one_to_max",
"(",
"cluster_labels",
")",
"# After 'cmetis' returns, we are done with clustering hyper-edges",
"# We are now ready to start the procedure meant to collapse meta-clusters.",
"N_consensus",
"=",
"np",
".",
"amax",
"(",
"cluster_labels",
")",
"+",
"1",
"fileh",
"=",
"tables",
".",
"open_file",
"(",
"hdf5_file_name",
",",
"'r+'",
")",
"FILTERS",
"=",
"get_compression_filter",
"(",
"4",
"*",
"N_consensus",
"*",
"N_samples",
")",
"clb_cum",
"=",
"fileh",
".",
"create_carray",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"'clb_cum'",
",",
"tables",
".",
"Float32Atom",
"(",
")",
",",
"(",
"N_consensus",
",",
"N_samples",
")",
",",
"'Matrix of mean memberships, forming meta-clusters'",
",",
"filters",
"=",
"FILTERS",
")",
"chunks_size",
"=",
"get_chunk_size",
"(",
"N_samples",
",",
"7",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"N_consensus",
",",
"chunks_size",
")",
":",
"x",
"=",
"min",
"(",
"chunks_size",
",",
"N_consensus",
"-",
"i",
")",
"matched_clusters",
"=",
"np",
".",
"where",
"(",
"cluster_labels",
"==",
"np",
".",
"reshape",
"(",
"np",
".",
"arange",
"(",
"i",
",",
"min",
"(",
"i",
"+",
"chunks_size",
",",
"N_consensus",
")",
")",
",",
"newshape",
"=",
"(",
"x",
",",
"1",
")",
")",
")",
"M",
"=",
"np",
".",
"zeros",
"(",
"(",
"x",
",",
"N_samples",
")",
")",
"for",
"j",
"in",
"range",
"(",
"x",
")",
":",
"coord",
"=",
"np",
".",
"where",
"(",
"matched_clusters",
"[",
"0",
"]",
"==",
"j",
")",
"[",
"0",
"]",
"M",
"[",
"j",
"]",
"=",
"np",
".",
"asarray",
"(",
"hypergraph_adjacency",
"[",
"matched_clusters",
"[",
"1",
"]",
"[",
"coord",
"]",
",",
":",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
")",
"clb_cum",
"[",
"i",
":",
"min",
"(",
"i",
"+",
"chunks_size",
",",
"N_consensus",
")",
"]",
"=",
"M",
"# Done with collapsing the hyper-edges into a single meta-hyper-edge, ",
"# for each of the (N_consensus - 1) meta-clusters.",
"del",
"hypergraph_adjacency",
"gc",
".",
"collect",
"(",
")",
"# Each object will now be assigned to its most associated meta-cluster.",
"chunks_size",
"=",
"get_chunk_size",
"(",
"N_consensus",
",",
"4",
")",
"N_chunks",
",",
"remainder",
"=",
"divmod",
"(",
"N_samples",
",",
"chunks_size",
")",
"if",
"N_chunks",
"==",
"0",
":",
"null_columns",
"=",
"np",
".",
"where",
"(",
"clb_cum",
"[",
":",
"]",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"==",
"0",
")",
"[",
"0",
"]",
"else",
":",
"szumsz",
"=",
"np",
".",
"zeros",
"(",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"N_chunks",
")",
":",
"M",
"=",
"clb_cum",
"[",
":",
",",
"i",
"*",
"chunks_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"chunks_size",
"]",
"szumsz",
"=",
"np",
".",
"append",
"(",
"szumsz",
",",
"M",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
")",
"if",
"remainder",
"!=",
"0",
":",
"M",
"=",
"clb_cum",
"[",
":",
",",
"N_chunks",
"*",
"chunks_size",
":",
"N_samples",
"]",
"szumsz",
"=",
"np",
".",
"append",
"(",
"szumsz",
",",
"M",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
")",
"null_columns",
"=",
"np",
".",
"where",
"(",
"szumsz",
"==",
"0",
")",
"[",
"0",
"]",
"if",
"null_columns",
".",
"size",
"!=",
"0",
":",
"print",
"(",
"\"INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations \"",
"\"in 'clb_cum' matrix of meta-clusters.\"",
".",
"format",
"(",
"null_columns",
".",
"size",
")",
")",
"clb_cum",
"[",
":",
",",
"null_columns",
"]",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"N_consensus",
",",
"null_columns",
".",
"size",
")",
"random_state",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
")",
"tmp",
"=",
"fileh",
".",
"create_carray",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"'tmp'",
",",
"tables",
".",
"Float32Atom",
"(",
")",
",",
"(",
"N_consensus",
",",
"N_samples",
")",
",",
"\"Temporary matrix to help with \"",
"\"collapsing to meta-hyper-edges\"",
",",
"filters",
"=",
"FILTERS",
")",
"chunks_size",
"=",
"get_chunk_size",
"(",
"N_samples",
",",
"2",
")",
"N_chunks",
",",
"remainder",
"=",
"divmod",
"(",
"N_consensus",
",",
"chunks_size",
")",
"if",
"N_chunks",
"==",
"0",
":",
"tmp",
"[",
":",
"]",
"=",
"random_state",
".",
"rand",
"(",
"N_consensus",
",",
"N_samples",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"N_chunks",
")",
":",
"tmp",
"[",
"i",
"*",
"chunks_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"chunks_size",
"]",
"=",
"random_state",
".",
"rand",
"(",
"chunks_size",
",",
"N_samples",
")",
"if",
"remainder",
"!=",
"0",
":",
"tmp",
"[",
"N_chunks",
"*",
"chunks_size",
":",
"N_consensus",
"]",
"=",
"random_state",
".",
"rand",
"(",
"remainder",
",",
"N_samples",
")",
"expr",
"=",
"tables",
".",
"Expr",
"(",
"\"clb_cum + (tmp / 10000)\"",
")",
"expr",
".",
"set_output",
"(",
"clb_cum",
")",
"expr",
".",
"eval",
"(",
")",
"expr",
"=",
"tables",
".",
"Expr",
"(",
"\"abs(tmp)\"",
")",
"expr",
".",
"set_output",
"(",
"tmp",
")",
"expr",
".",
"eval",
"(",
")",
"chunks_size",
"=",
"get_chunk_size",
"(",
"N_consensus",
",",
"2",
")",
"N_chunks",
",",
"remainder",
"=",
"divmod",
"(",
"N_samples",
",",
"chunks_size",
")",
"if",
"N_chunks",
"==",
"0",
":",
"sum_diag",
"=",
"tmp",
"[",
":",
"]",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"else",
":",
"sum_diag",
"=",
"np",
".",
"empty",
"(",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"N_chunks",
")",
":",
"M",
"=",
"tmp",
"[",
":",
",",
"i",
"*",
"chunks_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"chunks_size",
"]",
"sum_diag",
"=",
"np",
".",
"append",
"(",
"sum_diag",
",",
"M",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
")",
"if",
"remainder",
"!=",
"0",
":",
"M",
"=",
"tmp",
"[",
":",
",",
"N_chunks",
"*",
"chunks_size",
":",
"N_samples",
"]",
"sum_diag",
"=",
"np",
".",
"append",
"(",
"sum_diag",
",",
"M",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
")",
"fileh",
".",
"remove_node",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"\"tmp\"",
")",
"# The corresponding disk space will be freed after a call to 'fileh.close()'.",
"inv_sum_diag",
"=",
"np",
".",
"reciprocal",
"(",
"sum_diag",
".",
"astype",
"(",
"float",
")",
")",
"if",
"N_chunks",
"==",
"0",
":",
"clb_cum",
"*=",
"inv_sum_diag",
"max_entries",
"=",
"np",
".",
"amax",
"(",
"clb_cum",
",",
"axis",
"=",
"0",
")",
"else",
":",
"max_entries",
"=",
"np",
".",
"zeros",
"(",
"N_samples",
")",
"for",
"i",
"in",
"range",
"(",
"N_chunks",
")",
":",
"clb_cum",
"[",
":",
",",
"i",
"*",
"chunks_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"chunks_size",
"]",
"*=",
"inv_sum_diag",
"[",
"i",
"*",
"chunks_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"chunks_size",
"]",
"max_entries",
"[",
"i",
"*",
"chunks_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"chunks_size",
"]",
"=",
"np",
".",
"amax",
"(",
"clb_cum",
"[",
":",
",",
"i",
"*",
"chunks_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"chunks_size",
"]",
",",
"axis",
"=",
"0",
")",
"if",
"remainder",
"!=",
"0",
":",
"clb_cum",
"[",
":",
",",
"N_chunks",
"*",
"chunks_size",
":",
"N_samples",
"]",
"*=",
"inv_sum_diag",
"[",
"N_chunks",
"*",
"chunks_size",
":",
"N_samples",
"]",
"max_entries",
"[",
"N_chunks",
"*",
"chunks_size",
":",
"N_samples",
"]",
"=",
"np",
".",
"amax",
"(",
"clb_cum",
"[",
":",
",",
"N_chunks",
"*",
"chunks_size",
":",
"N_samples",
"]",
",",
"axis",
"=",
"0",
")",
"cluster_labels",
"=",
"np",
".",
"zeros",
"(",
"N_samples",
",",
"dtype",
"=",
"int",
")",
"winner_probabilities",
"=",
"np",
".",
"zeros",
"(",
"N_samples",
")",
"chunks_size",
"=",
"get_chunk_size",
"(",
"N_samples",
",",
"2",
")",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"0",
",",
"N_consensus",
",",
"chunks_size",
")",
")",
":",
"ind",
"=",
"np",
".",
"where",
"(",
"np",
".",
"tile",
"(",
"max_entries",
",",
"(",
"min",
"(",
"chunks_size",
",",
"N_consensus",
"-",
"i",
")",
",",
"1",
")",
")",
"==",
"clb_cum",
"[",
"i",
":",
"min",
"(",
"i",
"+",
"chunks_size",
",",
"N_consensus",
")",
"]",
")",
"cluster_labels",
"[",
"ind",
"[",
"1",
"]",
"]",
"=",
"i",
"+",
"ind",
"[",
"0",
"]",
"winner_probabilities",
"[",
"ind",
"[",
"1",
"]",
"]",
"=",
"clb_cum",
"[",
"(",
"ind",
"[",
"0",
"]",
"+",
"i",
",",
"ind",
"[",
"1",
"]",
")",
"]",
"# Done with competing for objects.",
"cluster_labels",
"=",
"one_to_max",
"(",
"cluster_labels",
")",
"print",
"(",
"\"INFO: Cluster_Ensembles: MCLA: delivering \"",
"\"{} clusters.\"",
".",
"format",
"(",
"np",
".",
"unique",
"(",
"cluster_labels",
")",
".",
"size",
")",
")",
"print",
"(",
"\"INFO: Cluster_Ensembles: MCLA: average posterior \"",
"\"probability is {}\"",
".",
"format",
"(",
"np",
".",
"mean",
"(",
"winner_probabilities",
")",
")",
")",
"if",
"cluster_labels",
".",
"size",
"<=",
"7",
":",
"print",
"(",
"\"INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:\"",
")",
"print",
"(",
"winner_probabilities",
")",
"print",
"(",
"\"'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:\"",
")",
"print",
"(",
"clb_cum",
")",
"fileh",
".",
"remove_node",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"\"clb_cum\"",
")",
"fileh",
".",
"close",
"(",
")",
"return",
"cluster_labels"
] | Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 | [
"Meta",
"-",
"CLustering",
"Algorithm",
"for",
"a",
"consensus",
"function",
".",
"Parameters",
"----------",
"hdf5_file_name",
":",
"file",
"handle",
"or",
"string",
"cluster_runs",
":",
"array",
"of",
"shape",
"(",
"n_partitions",
"n_samples",
")",
"verbose",
":",
"bool",
"optional",
"(",
"default",
"=",
"False",
")",
"N_clusters_max",
":",
"int",
"optional",
"(",
"default",
"=",
"None",
")",
"Returns",
"-------",
"A",
"vector",
"specifying",
"the",
"cluster",
"label",
"to",
"which",
"each",
"sample",
"has",
"been",
"assigned",
"by",
"the",
"MCLA",
"approximation",
"algorithm",
"for",
"consensus",
"clustering",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L660-L881 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | create_membership_matrix | def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size)) | python | def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size)) | [
"def",
"create_membership_matrix",
"(",
"cluster_run",
")",
":",
"cluster_run",
"=",
"np",
".",
"asanyarray",
"(",
"cluster_run",
")",
"if",
"reduce",
"(",
"operator",
".",
"mul",
",",
"cluster_run",
".",
"shape",
",",
"1",
")",
"!=",
"max",
"(",
"cluster_run",
".",
"shape",
")",
":",
"raise",
"ValueError",
"(",
"\"\\nERROR: Cluster_Ensembles: create_membership_matrix: \"",
"\"problem in dimensions of the cluster label vector \"",
"\"under consideration.\"",
")",
"else",
":",
"cluster_run",
"=",
"cluster_run",
".",
"reshape",
"(",
"cluster_run",
".",
"size",
")",
"cluster_ids",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"compress",
"(",
"np",
".",
"isfinite",
"(",
"cluster_run",
")",
",",
"cluster_run",
")",
")",
"indices",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"indptr",
"=",
"np",
".",
"zeros",
"(",
"1",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"elt",
"in",
"cluster_ids",
":",
"indices",
"=",
"np",
".",
"append",
"(",
"indices",
",",
"np",
".",
"where",
"(",
"cluster_run",
"==",
"elt",
")",
"[",
"0",
"]",
")",
"indptr",
"=",
"np",
".",
"append",
"(",
"indptr",
",",
"indices",
".",
"size",
")",
"data",
"=",
"np",
".",
"ones",
"(",
"indices",
".",
"size",
",",
"dtype",
"=",
"int",
")",
"return",
"scipy",
".",
"sparse",
".",
"csr_matrix",
"(",
"(",
"data",
",",
"indices",
",",
"indptr",
")",
",",
"shape",
"=",
"(",
"cluster_ids",
".",
"size",
",",
"cluster_run",
".",
"size",
")",
")"
] | For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form. | [
"For",
"a",
"label",
"vector",
"represented",
"by",
"cluster_run",
"constructs",
"the",
"binary",
"membership",
"indicator",
"matrix",
".",
"Such",
"matrices",
"when",
"concatenated",
"contribute",
"to",
"the",
"adjacency",
"matrix",
"for",
"a",
"hypergraph",
"representation",
"of",
"an",
"ensemble",
"of",
"clusterings",
".",
"Parameters",
"----------",
"cluster_run",
":",
"array",
"of",
"shape",
"(",
"n_partitions",
"n_samples",
")",
"Returns",
"-------",
"An",
"adjacnecy",
"matrix",
"in",
"compressed",
"sparse",
"row",
"form",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L884-L919 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | metis | def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels | python | def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels | [
"def",
"metis",
"(",
"hdf5_file_name",
",",
"N_clusters_max",
")",
":",
"file_name",
"=",
"wgraph",
"(",
"hdf5_file_name",
")",
"labels",
"=",
"sgraph",
"(",
"N_clusters_max",
",",
"file_name",
")",
"subprocess",
".",
"call",
"(",
"[",
"'rm'",
",",
"file_name",
"]",
")",
"return",
"labels"
] | METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999. | [
"METIS",
"algorithm",
"by",
"Karypis",
"and",
"Kumar",
".",
"Partitions",
"the",
"induced",
"similarity",
"graph",
"passed",
"by",
"CSPA",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L922-L949 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | hmetis | def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels | python | def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels | [
"def",
"hmetis",
"(",
"hdf5_file_name",
",",
"N_clusters_max",
",",
"w",
"=",
"None",
")",
":",
"if",
"w",
"is",
"None",
":",
"file_name",
"=",
"wgraph",
"(",
"hdf5_file_name",
",",
"None",
",",
"2",
")",
"else",
":",
"file_name",
"=",
"wgraph",
"(",
"hdf5_file_name",
",",
"w",
",",
"3",
")",
"labels",
"=",
"sgraph",
"(",
"N_clusters_max",
",",
"file_name",
")",
"labels",
"=",
"one_to_max",
"(",
"labels",
")",
"subprocess",
".",
"call",
"(",
"[",
"'rm'",
",",
"file_name",
"]",
")",
"return",
"labels"
] | Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999. | [
"Gives",
"cluster",
"labels",
"ranging",
"from",
"1",
"to",
"N_clusters_max",
"for",
"hypergraph",
"partitioning",
"required",
"for",
"HGPA",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L952-L987 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | wgraph | def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name | python | def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name | [
"def",
"wgraph",
"(",
"hdf5_file_name",
",",
"w",
"=",
"None",
",",
"method",
"=",
"0",
")",
":",
"print",
"(",
"'\\n#'",
")",
"if",
"method",
"==",
"0",
":",
"fileh",
"=",
"tables",
".",
"open_file",
"(",
"hdf5_file_name",
",",
"'r+'",
")",
"e_mat",
"=",
"fileh",
".",
"root",
".",
"consensus_group",
".",
"similarities_CSPA",
"file_name",
"=",
"'wgraph_CSPA'",
"elif",
"method",
"==",
"1",
":",
"fileh",
"=",
"tables",
".",
"open_file",
"(",
"hdf5_file_name",
",",
"'r+'",
")",
"e_mat",
"=",
"fileh",
".",
"root",
".",
"consensus_group",
".",
"similarities_MCLA",
"file_name",
"=",
"'wgraph_MCLA'",
"elif",
"method",
"in",
"{",
"2",
",",
"3",
"}",
":",
"hypergraph_adjacency",
"=",
"load_hypergraph_adjacency",
"(",
"hdf5_file_name",
")",
"e_mat",
"=",
"hypergraph_adjacency",
".",
"copy",
"(",
")",
".",
"transpose",
"(",
")",
"file_name",
"=",
"'wgraph_HGPA'",
"fileh",
"=",
"tables",
".",
"open_file",
"(",
"hdf5_file_name",
",",
"'r+'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"\\nERROR: Cluster_Ensembles: wgraph: \"",
"\"invalid code for choice of method; \"",
"\"choose either 0, 1, 2 or 3.\"",
")",
"if",
"w",
"is",
"None",
":",
"w",
"=",
"[",
"]",
"N_rows",
"=",
"e_mat",
".",
"shape",
"[",
"0",
"]",
"N_cols",
"=",
"e_mat",
".",
"shape",
"[",
"1",
"]",
"if",
"method",
"in",
"{",
"0",
",",
"1",
"}",
":",
"diag_ind",
"=",
"np",
".",
"diag_indices",
"(",
"N_rows",
")",
"e_mat",
"[",
"diag_ind",
"]",
"=",
"0",
"if",
"method",
"==",
"1",
":",
"scale_factor",
"=",
"100.0",
"w_sum_before",
"=",
"np",
".",
"sum",
"(",
"w",
")",
"w",
"*=",
"scale_factor",
"w",
"=",
"np",
".",
"rint",
"(",
"w",
")",
"with",
"open",
"(",
"file_name",
",",
"'w'",
")",
"as",
"file",
":",
"print",
"(",
"\"INFO: Cluster_Ensembles: wgraph: writing {}.\"",
".",
"format",
"(",
"file_name",
")",
")",
"if",
"method",
"==",
"0",
":",
"sz",
"=",
"float",
"(",
"np",
".",
"sum",
"(",
"e_mat",
"[",
":",
"]",
">",
"0",
")",
")",
"/",
"2",
"if",
"int",
"(",
"sz",
")",
"==",
"0",
":",
"return",
"'DO_NOT_PROCESS'",
"else",
":",
"file",
".",
"write",
"(",
"'{} {} 1\\n'",
".",
"format",
"(",
"N_rows",
",",
"int",
"(",
"sz",
")",
")",
")",
"elif",
"method",
"==",
"1",
":",
"chunks_size",
"=",
"get_chunk_size",
"(",
"N_cols",
",",
"2",
")",
"N_chunks",
",",
"remainder",
"=",
"divmod",
"(",
"N_rows",
",",
"chunks_size",
")",
"if",
"N_chunks",
"==",
"0",
":",
"sz",
"=",
"float",
"(",
"np",
".",
"sum",
"(",
"e_mat",
"[",
":",
"]",
">",
"0",
")",
")",
"/",
"2",
"else",
":",
"sz",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"N_chunks",
")",
":",
"M",
"=",
"e_mat",
"[",
"i",
"*",
"chunks_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"chunks_size",
"]",
"sz",
"+=",
"float",
"(",
"np",
".",
"sum",
"(",
"M",
">",
"0",
")",
")",
"if",
"remainder",
"!=",
"0",
":",
"M",
"=",
"e_mat",
"[",
"N_chunks",
"*",
"chunks_size",
":",
"N_rows",
"]",
"sz",
"+=",
"float",
"(",
"np",
".",
"sum",
"(",
"M",
">",
"0",
")",
")",
"sz",
"=",
"float",
"(",
"sz",
")",
"/",
"2",
"file",
".",
"write",
"(",
"'{} {} 11\\n'",
".",
"format",
"(",
"N_rows",
",",
"int",
"(",
"sz",
")",
")",
")",
"else",
":",
"file",
".",
"write",
"(",
"'{} {} 1\\n'",
".",
"format",
"(",
"N_cols",
",",
"N_rows",
")",
")",
"if",
"method",
"in",
"{",
"0",
",",
"1",
"}",
":",
"chunks_size",
"=",
"get_chunk_size",
"(",
"N_cols",
",",
"2",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"N_rows",
",",
"chunks_size",
")",
":",
"M",
"=",
"e_mat",
"[",
"i",
":",
"min",
"(",
"i",
"+",
"chunks_size",
",",
"N_rows",
")",
"]",
"for",
"j",
"in",
"range",
"(",
"M",
".",
"shape",
"[",
"0",
"]",
")",
":",
"edges",
"=",
"np",
".",
"where",
"(",
"M",
"[",
"j",
"]",
">",
"0",
")",
"[",
"0",
"]",
"weights",
"=",
"M",
"[",
"j",
",",
"edges",
"]",
"if",
"method",
"==",
"0",
":",
"interlaced",
"=",
"np",
".",
"zeros",
"(",
"2",
"*",
"edges",
".",
"size",
",",
"dtype",
"=",
"int",
")",
"# METIS and hMETIS have vertices numbering starting from 1:",
"interlaced",
"[",
":",
":",
"2",
"]",
"=",
"edges",
"+",
"1",
"interlaced",
"[",
"1",
":",
":",
"2",
"]",
"=",
"weights",
"else",
":",
"interlaced",
"=",
"np",
".",
"zeros",
"(",
"1",
"+",
"2",
"*",
"edges",
".",
"size",
",",
"dtype",
"=",
"int",
")",
"interlaced",
"[",
"0",
"]",
"=",
"w",
"[",
"i",
"+",
"j",
"]",
"# METIS and hMETIS have vertices numbering starting from 1:",
"interlaced",
"[",
"1",
":",
":",
"2",
"]",
"=",
"edges",
"+",
"1",
"interlaced",
"[",
"2",
":",
":",
"2",
"]",
"=",
"weights",
"for",
"elt",
"in",
"interlaced",
":",
"file",
".",
"write",
"(",
"'{} '",
".",
"format",
"(",
"int",
"(",
"elt",
")",
")",
")",
"file",
".",
"write",
"(",
"'\\n'",
")",
"else",
":",
"print",
"(",
"\"INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} \"",
"\"non-zero hyper-edges.\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
")",
"chunks_size",
"=",
"get_chunk_size",
"(",
"N_rows",
",",
"2",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"N_cols",
",",
"chunks_size",
")",
":",
"M",
"=",
"np",
".",
"asarray",
"(",
"e_mat",
"[",
":",
",",
"i",
":",
"min",
"(",
"i",
"+",
"chunks_size",
",",
"N_cols",
")",
"]",
".",
"todense",
"(",
")",
")",
"for",
"j",
"in",
"range",
"(",
"M",
".",
"shape",
"[",
"1",
"]",
")",
":",
"edges",
"=",
"np",
".",
"where",
"(",
"M",
"[",
":",
",",
"j",
"]",
">",
"0",
")",
"[",
"0",
"]",
"if",
"method",
"==",
"2",
":",
"weight",
"=",
"np",
".",
"array",
"(",
"M",
"[",
":",
",",
"j",
"]",
".",
"sum",
"(",
")",
",",
"dtype",
"=",
"int",
")",
"else",
":",
"weight",
"=",
"w",
"[",
"i",
"+",
"j",
"]",
"# METIS and hMETIS require vertices numbering starting from 1:",
"interlaced",
"=",
"np",
".",
"append",
"(",
"weight",
",",
"edges",
"+",
"1",
")",
"for",
"elt",
"in",
"interlaced",
":",
"file",
".",
"write",
"(",
"'{} '",
".",
"format",
"(",
"int",
"(",
"elt",
")",
")",
")",
"file",
".",
"write",
"(",
"'\\n'",
")",
"if",
"method",
"in",
"{",
"0",
",",
"1",
"}",
":",
"fileh",
".",
"remove_node",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"e_mat",
".",
"name",
")",
"fileh",
".",
"close",
"(",
")",
"print",
"(",
"'#'",
")",
"return",
"file_name"
] | Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string | [
"Write",
"a",
"graph",
"file",
"in",
"a",
"format",
"apposite",
"to",
"later",
"use",
"by",
"METIS",
"or",
"HMETIS",
".",
"Parameters",
"----------",
"hdf5_file_name",
":",
"file",
"handle",
"or",
"string",
"w",
":",
"list",
"or",
"array",
"optional",
"(",
"default",
"=",
"None",
")",
"method",
":",
"int",
"optional",
"(",
"default",
"=",
"0",
")",
"Returns",
"-------",
"file_name",
":",
"string"
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L1024-L1154 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | sgraph | def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels | python | def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels | [
"def",
"sgraph",
"(",
"N_clusters_max",
",",
"file_name",
")",
":",
"if",
"file_name",
"==",
"'DO_NOT_PROCESS'",
":",
"return",
"[",
"]",
"print",
"(",
"'\\n#'",
")",
"k",
"=",
"str",
"(",
"N_clusters_max",
")",
"out_name",
"=",
"file_name",
"+",
"'.part.'",
"+",
"k",
"if",
"file_name",
"==",
"'wgraph_HGPA'",
":",
"print",
"(",
"\"INFO: Cluster_Ensembles: sgraph: \"",
"\"calling shmetis for hypergraph partitioning.\"",
")",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'linux'",
")",
":",
"shmetis_path",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis'",
")",
"elif",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'darwin'",
")",
":",
"shmetis_path",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis'",
")",
"else",
":",
"print",
"(",
"\"ERROR: Cluster_Ensembles: sgraph:\\n\"",
"\"your platform is not supported. Some code required for graph partition \"",
"\"is only available for Linux distributions and OS X.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"args",
"=",
"\"{0} ./\"",
".",
"format",
"(",
"shmetis_path",
")",
"+",
"file_name",
"+",
"\" \"",
"+",
"k",
"+",
"\" 15\"",
"subprocess",
".",
"call",
"(",
"args",
",",
"shell",
"=",
"True",
")",
"elif",
"file_name",
"==",
"'wgraph_CSPA'",
"or",
"file_name",
"==",
"'wgraph_MCLA'",
":",
"print",
"(",
"\"INFO: Cluster_Ensembles: sgraph: \"",
"\"calling gpmetis for graph partitioning.\"",
")",
"args",
"=",
"\"gpmetis ./\"",
"+",
"file_name",
"+",
"\" \"",
"+",
"k",
"subprocess",
".",
"call",
"(",
"args",
",",
"shell",
"=",
"True",
")",
"else",
":",
"raise",
"NameError",
"(",
"\"ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable \"",
"\"file-name.\"",
".",
"format",
"(",
"file_name",
")",
")",
"labels",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"dtype",
"=",
"int",
")",
"with",
"open",
"(",
"out_name",
",",
"'r'",
")",
"as",
"file",
":",
"print",
"(",
"\"INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; \"",
"\"loading {}\"",
".",
"format",
"(",
"out_name",
")",
")",
"labels",
"=",
"np",
".",
"loadtxt",
"(",
"out_name",
",",
"dtype",
"=",
"int",
")",
"labels",
"=",
"labels",
".",
"reshape",
"(",
"labels",
".",
"size",
")",
"labels",
"=",
"one_to_max",
"(",
"labels",
")",
"subprocess",
".",
"call",
"(",
"[",
"'rm'",
",",
"out_name",
"]",
")",
"print",
"(",
"'#'",
")",
"return",
"labels"
] | Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA). | [
"Runs",
"METIS",
"or",
"hMETIS",
"and",
"returns",
"the",
"labels",
"found",
"by",
"those",
"(",
"hyper",
"-",
")",
"graph",
"partitioning",
"algorithms",
".",
"Parameters",
"----------",
"N_clusters_max",
":",
"int",
"file_name",
":",
"string",
"Returns",
"-------",
"labels",
":",
"array",
"of",
"shape",
"(",
"n_samples",
")",
"A",
"vector",
"of",
"labels",
"denoting",
"the",
"cluster",
"to",
"which",
"each",
"sample",
"has",
"been",
"assigned",
"as",
"a",
"result",
"of",
"any",
"of",
"three",
"approximation",
"algorithms",
"for",
"consensus",
"clustering",
"(",
"either",
"of",
"CSPA",
"HGPA",
"or",
"MCLA",
")",
"."
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L1157-L1221 |
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | overlap_matrix | def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency | python | def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency | [
"def",
"overlap_matrix",
"(",
"hdf5_file_name",
",",
"consensus_labels",
",",
"cluster_runs",
")",
":",
"if",
"reduce",
"(",
"operator",
".",
"mul",
",",
"cluster_runs",
".",
"shape",
",",
"1",
")",
"==",
"max",
"(",
"cluster_runs",
".",
"shape",
")",
":",
"cluster_runs",
"=",
"cluster_runs",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"N_runs",
",",
"N_samples",
"=",
"cluster_runs",
".",
"shape",
"N_consensus_labels",
"=",
"np",
".",
"unique",
"(",
"consensus_labels",
")",
".",
"size",
"indices_consensus_adjacency",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"indptr_consensus_adjacency",
"=",
"np",
".",
"zeros",
"(",
"1",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"for",
"k",
"in",
"range",
"(",
"N_consensus_labels",
")",
":",
"indices_consensus_adjacency",
"=",
"np",
".",
"append",
"(",
"indices_consensus_adjacency",
",",
"np",
".",
"where",
"(",
"consensus_labels",
"==",
"k",
")",
"[",
"0",
"]",
")",
"indptr_consensus_adjacency",
"=",
"np",
".",
"append",
"(",
"indptr_consensus_adjacency",
",",
"indices_consensus_adjacency",
".",
"size",
")",
"data_consensus_adjacency",
"=",
"np",
".",
"ones",
"(",
"indices_consensus_adjacency",
".",
"size",
",",
"dtype",
"=",
"int",
")",
"consensus_adjacency",
"=",
"scipy",
".",
"sparse",
".",
"csr_matrix",
"(",
"(",
"data_consensus_adjacency",
",",
"indices_consensus_adjacency",
",",
"indptr_consensus_adjacency",
")",
",",
"shape",
"=",
"(",
"N_consensus_labels",
",",
"N_samples",
")",
")",
"fileh",
"=",
"tables",
".",
"open_file",
"(",
"hdf5_file_name",
",",
"'r+'",
")",
"FILTERS",
"=",
"get_compression_filter",
"(",
"4",
"*",
"N_consensus_labels",
"*",
"N_runs",
")",
"overlap_matrix",
"=",
"fileh",
".",
"create_earray",
"(",
"fileh",
".",
"root",
".",
"consensus_group",
",",
"'overlap_matrix'",
",",
"tables",
".",
"Float32Atom",
"(",
")",
",",
"(",
"0",
",",
"N_consensus_labels",
")",
",",
"\"Matrix of overlaps between each run and \"",
"\"the consensus labellings\"",
",",
"filters",
"=",
"FILTERS",
",",
"expectedrows",
"=",
"N_consensus_labels",
"*",
"N_runs",
")",
"mutual_info_list",
"=",
"[",
"]",
"cluster_dims_list",
"=",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"N_runs",
")",
":",
"M",
"=",
"cluster_runs",
"[",
"i",
"]",
"mutual_info_list",
".",
"append",
"(",
"ceEvalMutual",
"(",
"M",
",",
"consensus_labels",
")",
")",
"finite_indices",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isfinite",
"(",
"M",
")",
")",
"[",
"0",
"]",
"positive_indices",
"=",
"np",
".",
"where",
"(",
"M",
">=",
"0",
")",
"[",
"0",
"]",
"selected_indices",
"=",
"np",
".",
"intersect1d",
"(",
"finite_indices",
",",
"positive_indices",
",",
"assume_unique",
"=",
"True",
")",
"cluster_ids",
"=",
"np",
".",
"unique",
"(",
"M",
"[",
"selected_indices",
"]",
")",
"n_ids",
"=",
"cluster_ids",
".",
"size",
"cluster_dims_list",
".",
"append",
"(",
"n_ids",
")",
"unions",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_ids",
",",
"N_consensus_labels",
")",
",",
"dtype",
"=",
"float",
")",
"indices",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"dtype",
"=",
"int",
")",
"indptr",
"=",
"[",
"0",
"]",
"c",
"=",
"0",
"for",
"elt",
"in",
"cluster_ids",
":",
"indices",
"=",
"np",
".",
"append",
"(",
"indices",
",",
"np",
".",
"where",
"(",
"M",
"==",
"elt",
")",
"[",
"0",
"]",
")",
"indptr",
".",
"append",
"(",
"indices",
".",
"size",
")",
"for",
"k",
"in",
"range",
"(",
"N_consensus_labels",
")",
":",
"x",
"=",
"indices_consensus_adjacency",
"[",
"indptr_consensus_adjacency",
"[",
"k",
"]",
":",
"indptr_consensus_adjacency",
"[",
"k",
"+",
"1",
"]",
"]",
"unions",
"[",
"c",
",",
"k",
"]",
"=",
"np",
".",
"union1d",
"(",
"indices",
",",
"x",
")",
".",
"size",
"c",
"+=",
"1",
"data",
"=",
"np",
".",
"ones",
"(",
"indices",
".",
"size",
",",
"dtype",
"=",
"int",
")",
"I",
"=",
"scipy",
".",
"sparse",
".",
"csr_matrix",
"(",
"(",
"data",
",",
"indices",
",",
"indptr",
")",
",",
"shape",
"=",
"(",
"n_ids",
",",
"N_samples",
")",
")",
"intersections",
"=",
"I",
".",
"dot",
"(",
"consensus_adjacency",
".",
"transpose",
"(",
")",
")",
"intersections",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"asarray",
"(",
"intersections",
".",
"todense",
"(",
")",
")",
")",
"overlap_matrix",
".",
"append",
"(",
"np",
".",
"divide",
"(",
"intersections",
",",
"unions",
")",
")",
"fileh",
".",
"close",
"(",
")",
"return",
"cluster_dims_list",
",",
"mutual_info_list",
",",
"consensus_adjacency"
] | Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency : | [
"Writes",
"on",
"disk",
"(",
"in",
"an",
"HDF5",
"file",
"whose",
"handle",
"is",
"provided",
"as",
"the",
"first",
"argument",
"to",
"this",
"function",
")",
"a",
"stack",
"of",
"matrices",
"each",
"describing",
"for",
"a",
"particular",
"run",
"the",
"overlap",
"of",
"cluster",
"ID",
"s",
"that",
"are",
"matching",
"each",
"of",
"the",
"cluster",
"ID",
"s",
"stored",
"in",
"consensus_labels",
"(",
"the",
"vector",
"of",
"labels",
"obtained",
"by",
"ensemble",
"clustering",
")",
".",
"Returns",
"also",
"the",
"adjacency",
"matrix",
"for",
"consensus",
"clustering",
"and",
"a",
"vector",
"of",
"mutual",
"informations",
"between",
"each",
"of",
"the",
"clusterings",
"from",
"the",
"ensemble",
"and",
"their",
"consensus",
".",
"Parameters",
"----------",
"hdf5_file_name",
":",
"file",
"handle",
"or",
"string",
"consensus_labels",
":",
"array",
"of",
"shape",
"(",
"n_samples",
")",
"cluster_runs",
":",
"array",
"of",
"shape",
"(",
"n_partitions",
"n_samples",
")",
"Returns",
"-------",
"cluster_dims_list",
":",
"mutual_info_list",
":",
"consensus_adjacency",
":"
] | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L1224-L1322 |
9b/google-alerts | google_alerts/__init__.py | obfuscate | def obfuscate(p, action):
"""Obfuscate the auth details to avoid easy snatching.
It's best to use a throw away account for these alerts to avoid having
your authentication put at risk by storing it locally.
"""
key = "ru7sll3uQrGtDPcIW3okutpFLo6YYtd5bWSpbZJIopYQ0Du0a1WlhvJOaZEH"
s = list()
if action == 'store':
if PY2:
for i in range(len(p)):
kc = key[i % len(key)]
ec = chr((ord(p[i]) + ord(kc)) % 256)
s.append(ec)
return base64.urlsafe_b64encode("".join(s))
else:
return base64.urlsafe_b64encode(p.encode()).decode()
else:
if PY2:
e = base64.urlsafe_b64decode(p)
for i in range(len(e)):
kc = key[i % len(key)]
dc = chr((256 + ord(e[i]) - ord(kc)) % 256)
s.append(dc)
return "".join(s)
else:
e = base64.urlsafe_b64decode(p)
return e.decode() | python | def obfuscate(p, action):
"""Obfuscate the auth details to avoid easy snatching.
It's best to use a throw away account for these alerts to avoid having
your authentication put at risk by storing it locally.
"""
key = "ru7sll3uQrGtDPcIW3okutpFLo6YYtd5bWSpbZJIopYQ0Du0a1WlhvJOaZEH"
s = list()
if action == 'store':
if PY2:
for i in range(len(p)):
kc = key[i % len(key)]
ec = chr((ord(p[i]) + ord(kc)) % 256)
s.append(ec)
return base64.urlsafe_b64encode("".join(s))
else:
return base64.urlsafe_b64encode(p.encode()).decode()
else:
if PY2:
e = base64.urlsafe_b64decode(p)
for i in range(len(e)):
kc = key[i % len(key)]
dc = chr((256 + ord(e[i]) - ord(kc)) % 256)
s.append(dc)
return "".join(s)
else:
e = base64.urlsafe_b64decode(p)
return e.decode() | [
"def",
"obfuscate",
"(",
"p",
",",
"action",
")",
":",
"key",
"=",
"\"ru7sll3uQrGtDPcIW3okutpFLo6YYtd5bWSpbZJIopYQ0Du0a1WlhvJOaZEH\"",
"s",
"=",
"list",
"(",
")",
"if",
"action",
"==",
"'store'",
":",
"if",
"PY2",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"p",
")",
")",
":",
"kc",
"=",
"key",
"[",
"i",
"%",
"len",
"(",
"key",
")",
"]",
"ec",
"=",
"chr",
"(",
"(",
"ord",
"(",
"p",
"[",
"i",
"]",
")",
"+",
"ord",
"(",
"kc",
")",
")",
"%",
"256",
")",
"s",
".",
"append",
"(",
"ec",
")",
"return",
"base64",
".",
"urlsafe_b64encode",
"(",
"\"\"",
".",
"join",
"(",
"s",
")",
")",
"else",
":",
"return",
"base64",
".",
"urlsafe_b64encode",
"(",
"p",
".",
"encode",
"(",
")",
")",
".",
"decode",
"(",
")",
"else",
":",
"if",
"PY2",
":",
"e",
"=",
"base64",
".",
"urlsafe_b64decode",
"(",
"p",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"e",
")",
")",
":",
"kc",
"=",
"key",
"[",
"i",
"%",
"len",
"(",
"key",
")",
"]",
"dc",
"=",
"chr",
"(",
"(",
"256",
"+",
"ord",
"(",
"e",
"[",
"i",
"]",
")",
"-",
"ord",
"(",
"kc",
")",
")",
"%",
"256",
")",
"s",
".",
"append",
"(",
"dc",
")",
"return",
"\"\"",
".",
"join",
"(",
"s",
")",
"else",
":",
"e",
"=",
"base64",
".",
"urlsafe_b64decode",
"(",
"p",
")",
"return",
"e",
".",
"decode",
"(",
")"
] | Obfuscate the auth details to avoid easy snatching.
It's best to use a throw away account for these alerts to avoid having
your authentication put at risk by storing it locally. | [
"Obfuscate",
"the",
"auth",
"details",
"to",
"avoid",
"easy",
"snatching",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L58-L85 |
9b/google-alerts | google_alerts/__init__.py | GoogleAlerts._config_bootstrap | def _config_bootstrap(self):
"""Go through and establish the defaults on the file system.
The approach here was stolen from the CLI tool provided with the
module. Idea being that the user should not always need to provide a
username and password in order to run the script. If the configuration
file is already present with valid data, then lets use it.
"""
if not os.path.exists(CONFIG_PATH):
os.makedirs(CONFIG_PATH)
if not os.path.exists(CONFIG_FILE):
json.dump(CONFIG_DEFAULTS, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
config = CONFIG_DEFAULTS
if self._email and self._password:
# Save the configuration locally to pull later on
config['email'] = self._email
config['password'] = str(obfuscate(self._password, 'store'))
self._log.debug("Caching authentication in config file")
json.dump(config, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
else:
# Load the config file and override the class
config = json.load(open(CONFIG_FILE))
if config.get('py2', PY2) != PY2:
raise Exception("Python versions have changed. Please run `setup` again to reconfigure the client.")
if config['email'] and config['password']:
self._email = config['email']
self._password = obfuscate(str(config['password']), 'fetch')
self._log.debug("Loaded authentication from config file") | python | def _config_bootstrap(self):
"""Go through and establish the defaults on the file system.
The approach here was stolen from the CLI tool provided with the
module. Idea being that the user should not always need to provide a
username and password in order to run the script. If the configuration
file is already present with valid data, then lets use it.
"""
if not os.path.exists(CONFIG_PATH):
os.makedirs(CONFIG_PATH)
if not os.path.exists(CONFIG_FILE):
json.dump(CONFIG_DEFAULTS, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
config = CONFIG_DEFAULTS
if self._email and self._password:
# Save the configuration locally to pull later on
config['email'] = self._email
config['password'] = str(obfuscate(self._password, 'store'))
self._log.debug("Caching authentication in config file")
json.dump(config, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
else:
# Load the config file and override the class
config = json.load(open(CONFIG_FILE))
if config.get('py2', PY2) != PY2:
raise Exception("Python versions have changed. Please run `setup` again to reconfigure the client.")
if config['email'] and config['password']:
self._email = config['email']
self._password = obfuscate(str(config['password']), 'fetch')
self._log.debug("Loaded authentication from config file") | [
"def",
"_config_bootstrap",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"CONFIG_PATH",
")",
":",
"os",
".",
"makedirs",
"(",
"CONFIG_PATH",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"CONFIG_FILE",
")",
":",
"json",
".",
"dump",
"(",
"CONFIG_DEFAULTS",
",",
"open",
"(",
"CONFIG_FILE",
",",
"'w'",
")",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"config",
"=",
"CONFIG_DEFAULTS",
"if",
"self",
".",
"_email",
"and",
"self",
".",
"_password",
":",
"# Save the configuration locally to pull later on",
"config",
"[",
"'email'",
"]",
"=",
"self",
".",
"_email",
"config",
"[",
"'password'",
"]",
"=",
"str",
"(",
"obfuscate",
"(",
"self",
".",
"_password",
",",
"'store'",
")",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Caching authentication in config file\"",
")",
"json",
".",
"dump",
"(",
"config",
",",
"open",
"(",
"CONFIG_FILE",
",",
"'w'",
")",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"else",
":",
"# Load the config file and override the class",
"config",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"CONFIG_FILE",
")",
")",
"if",
"config",
".",
"get",
"(",
"'py2'",
",",
"PY2",
")",
"!=",
"PY2",
":",
"raise",
"Exception",
"(",
"\"Python versions have changed. Please run `setup` again to reconfigure the client.\"",
")",
"if",
"config",
"[",
"'email'",
"]",
"and",
"config",
"[",
"'password'",
"]",
":",
"self",
".",
"_email",
"=",
"config",
"[",
"'email'",
"]",
"self",
".",
"_password",
"=",
"obfuscate",
"(",
"str",
"(",
"config",
"[",
"'password'",
"]",
")",
",",
"'fetch'",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Loaded authentication from config file\"",
")"
] | Go through and establish the defaults on the file system.
The approach here was stolen from the CLI tool provided with the
module. Idea being that the user should not always need to provide a
username and password in order to run the script. If the configuration
file is already present with valid data, then lets use it. | [
"Go",
"through",
"and",
"establish",
"the",
"defaults",
"on",
"the",
"file",
"system",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L133-L162 |
9b/google-alerts | google_alerts/__init__.py | GoogleAlerts._session_check | def _session_check(self):
"""Attempt to authenticate the user through a session file.
This process is done to avoid having to authenticate the user every
single time. It uses a session file that is saved when a valid session
is captured and then reused. Because sessions can expire, we need to
test the session prior to calling the user authenticated. Right now
that is done with a test string found in an unauthenticated session.
This approach is not an ideal method, but it works.
"""
if not os.path.exists(SESSION_FILE):
self._log.debug("Session file does not exist")
return False
with open(SESSION_FILE, 'rb') as f:
cookies = requests.utils.cookiejar_from_dict(pickle.load(f))
self._session.cookies = cookies
self._log.debug("Loaded cookies from session file")
response = self._session.get(url=self.TEST_URL, headers=self.HEADERS)
if self.TEST_KEY in str(response.content):
self._log.debug("Session file appears invalid")
return False
self._is_authenticated = True
self._process_state()
return True | python | def _session_check(self):
"""Attempt to authenticate the user through a session file.
This process is done to avoid having to authenticate the user every
single time. It uses a session file that is saved when a valid session
is captured and then reused. Because sessions can expire, we need to
test the session prior to calling the user authenticated. Right now
that is done with a test string found in an unauthenticated session.
This approach is not an ideal method, but it works.
"""
if not os.path.exists(SESSION_FILE):
self._log.debug("Session file does not exist")
return False
with open(SESSION_FILE, 'rb') as f:
cookies = requests.utils.cookiejar_from_dict(pickle.load(f))
self._session.cookies = cookies
self._log.debug("Loaded cookies from session file")
response = self._session.get(url=self.TEST_URL, headers=self.HEADERS)
if self.TEST_KEY in str(response.content):
self._log.debug("Session file appears invalid")
return False
self._is_authenticated = True
self._process_state()
return True | [
"def",
"_session_check",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"SESSION_FILE",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Session file does not exist\"",
")",
"return",
"False",
"with",
"open",
"(",
"SESSION_FILE",
",",
"'rb'",
")",
"as",
"f",
":",
"cookies",
"=",
"requests",
".",
"utils",
".",
"cookiejar_from_dict",
"(",
"pickle",
".",
"load",
"(",
"f",
")",
")",
"self",
".",
"_session",
".",
"cookies",
"=",
"cookies",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Loaded cookies from session file\"",
")",
"response",
"=",
"self",
".",
"_session",
".",
"get",
"(",
"url",
"=",
"self",
".",
"TEST_URL",
",",
"headers",
"=",
"self",
".",
"HEADERS",
")",
"if",
"self",
".",
"TEST_KEY",
"in",
"str",
"(",
"response",
".",
"content",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Session file appears invalid\"",
")",
"return",
"False",
"self",
".",
"_is_authenticated",
"=",
"True",
"self",
".",
"_process_state",
"(",
")",
"return",
"True"
] | Attempt to authenticate the user through a session file.
This process is done to avoid having to authenticate the user every
single time. It uses a session file that is saved when a valid session
is captured and then reused. Because sessions can expire, we need to
test the session prior to calling the user authenticated. Right now
that is done with a test string found in an unauthenticated session.
This approach is not an ideal method, but it works. | [
"Attempt",
"to",
"authenticate",
"the",
"user",
"through",
"a",
"session",
"file",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L164-L187 |
9b/google-alerts | google_alerts/__init__.py | GoogleAlerts._logger | def _logger(self):
"""Create a logger to be used between processes.
:returns: Logging instance.
"""
logger = logging.getLogger(self.NAME)
logger.setLevel(self.LOG_LEVEL)
shandler = logging.StreamHandler(sys.stdout)
fmt = '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'
fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s'
shandler.setFormatter(logging.Formatter(fmt))
logger.addHandler(shandler)
return logger | python | def _logger(self):
"""Create a logger to be used between processes.
:returns: Logging instance.
"""
logger = logging.getLogger(self.NAME)
logger.setLevel(self.LOG_LEVEL)
shandler = logging.StreamHandler(sys.stdout)
fmt = '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'
fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s'
shandler.setFormatter(logging.Formatter(fmt))
logger.addHandler(shandler)
return logger | [
"def",
"_logger",
"(",
"self",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"self",
".",
"NAME",
")",
"logger",
".",
"setLevel",
"(",
"self",
".",
"LOG_LEVEL",
")",
"shandler",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stdout",
")",
"fmt",
"=",
"'\\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'",
"fmt",
"+=",
"'%(lineno)d %(asctime)s\\033[0m| %(message)s'",
"shandler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"fmt",
")",
")",
"logger",
".",
"addHandler",
"(",
"shandler",
")",
"return",
"logger"
] | Create a logger to be used between processes.
:returns: Logging instance. | [
"Create",
"a",
"logger",
"to",
"be",
"used",
"between",
"processes",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L189-L201 |
9b/google-alerts | google_alerts/__init__.py | GoogleAlerts.set_log_level | def set_log_level(self, level):
"""Override the default log level of the class"""
if level == 'info':
level = logging.INFO
if level == 'debug':
level = logging.DEBUG
if level == 'error':
level = logging.ERROR
self._log.setLevel(level) | python | def set_log_level(self, level):
"""Override the default log level of the class"""
if level == 'info':
level = logging.INFO
if level == 'debug':
level = logging.DEBUG
if level == 'error':
level = logging.ERROR
self._log.setLevel(level) | [
"def",
"set_log_level",
"(",
"self",
",",
"level",
")",
":",
"if",
"level",
"==",
"'info'",
":",
"level",
"=",
"logging",
".",
"INFO",
"if",
"level",
"==",
"'debug'",
":",
"level",
"=",
"logging",
".",
"DEBUG",
"if",
"level",
"==",
"'error'",
":",
"level",
"=",
"logging",
".",
"ERROR",
"self",
".",
"_log",
".",
"setLevel",
"(",
"level",
")"
] | Override the default log level of the class | [
"Override",
"the",
"default",
"log",
"level",
"of",
"the",
"class"
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L203-L211 |
9b/google-alerts | google_alerts/__init__.py | GoogleAlerts._process_state | def _process_state(self):
"""Process the application state configuration.
Google Alerts manages the account information and alert data through
some custom state configuration. Not all values have been completely
enumerated.
"""
self._log.debug("Capturing state from the request")
response = self._session.get(url=self.ALERTS_URL, headers=self.HEADERS)
soup = BeautifulSoup(response.content, "html.parser")
for i in soup.findAll('script'):
if i.text.find('window.STATE') == -1:
continue
state = json.loads(i.text[15:-1])
if state != "":
self._state = state
self._log.debug("State value set: %s" % self._state)
return self._state | python | def _process_state(self):
"""Process the application state configuration.
Google Alerts manages the account information and alert data through
some custom state configuration. Not all values have been completely
enumerated.
"""
self._log.debug("Capturing state from the request")
response = self._session.get(url=self.ALERTS_URL, headers=self.HEADERS)
soup = BeautifulSoup(response.content, "html.parser")
for i in soup.findAll('script'):
if i.text.find('window.STATE') == -1:
continue
state = json.loads(i.text[15:-1])
if state != "":
self._state = state
self._log.debug("State value set: %s" % self._state)
return self._state | [
"def",
"_process_state",
"(",
"self",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Capturing state from the request\"",
")",
"response",
"=",
"self",
".",
"_session",
".",
"get",
"(",
"url",
"=",
"self",
".",
"ALERTS_URL",
",",
"headers",
"=",
"self",
".",
"HEADERS",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"response",
".",
"content",
",",
"\"html.parser\"",
")",
"for",
"i",
"in",
"soup",
".",
"findAll",
"(",
"'script'",
")",
":",
"if",
"i",
".",
"text",
".",
"find",
"(",
"'window.STATE'",
")",
"==",
"-",
"1",
":",
"continue",
"state",
"=",
"json",
".",
"loads",
"(",
"i",
".",
"text",
"[",
"15",
":",
"-",
"1",
"]",
")",
"if",
"state",
"!=",
"\"\"",
":",
"self",
".",
"_state",
"=",
"state",
"self",
".",
"_log",
".",
"debug",
"(",
"\"State value set: %s\"",
"%",
"self",
".",
"_state",
")",
"return",
"self",
".",
"_state"
] | Process the application state configuration.
Google Alerts manages the account information and alert data through
some custom state configuration. Not all values have been completely
enumerated. | [
"Process",
"the",
"application",
"state",
"configuration",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L213-L230 |
9b/google-alerts | google_alerts/__init__.py | GoogleAlerts.authenticate | def authenticate(self):
"""Authenticate the user and setup our state."""
valid = self._session_check()
if self._is_authenticated and valid:
self._log.debug("[!] User has already authenticated")
return
init = self._session.get(url=self.LOGIN_URL, headers=self.HEADERS)
soup = BeautifulSoup(init.content, "html.parser")
soup_login = soup.find('form').find_all('input')
post_data = dict()
for u in soup_login:
if u.has_attr('name') and u.has_attr('value'):
post_data[u['name']] = u['value']
post_data['Email'] = self._email
post_data['Passwd'] = self._password
response = self._session.post(url=self.AUTH_URL, data=post_data,
headers=self.HEADERS)
if self.CAPTCHA_KEY in str(response.content):
raise AccountCaptcha('Google is forcing a CAPTCHA. To get around this issue, run the google-alerts with the seed option to open an interactive authentication session. Once authenticated, this module will cache your session and load that in the future')
cookies = [x.name for x in response.cookies]
if 'SIDCC' not in cookies:
raise InvalidCredentials("Email or password was incorrect.")
with open(SESSION_FILE, 'wb') as f:
cookies = requests.utils.dict_from_cookiejar(self._session.cookies)
pickle.dump(cookies, f, protocol=2)
self._log.debug("Saved session to disk for future reference")
self._log.debug("User successfully authenticated")
self._is_authenticated = True
self._process_state()
return | python | def authenticate(self):
"""Authenticate the user and setup our state."""
valid = self._session_check()
if self._is_authenticated and valid:
self._log.debug("[!] User has already authenticated")
return
init = self._session.get(url=self.LOGIN_URL, headers=self.HEADERS)
soup = BeautifulSoup(init.content, "html.parser")
soup_login = soup.find('form').find_all('input')
post_data = dict()
for u in soup_login:
if u.has_attr('name') and u.has_attr('value'):
post_data[u['name']] = u['value']
post_data['Email'] = self._email
post_data['Passwd'] = self._password
response = self._session.post(url=self.AUTH_URL, data=post_data,
headers=self.HEADERS)
if self.CAPTCHA_KEY in str(response.content):
raise AccountCaptcha('Google is forcing a CAPTCHA. To get around this issue, run the google-alerts with the seed option to open an interactive authentication session. Once authenticated, this module will cache your session and load that in the future')
cookies = [x.name for x in response.cookies]
if 'SIDCC' not in cookies:
raise InvalidCredentials("Email or password was incorrect.")
with open(SESSION_FILE, 'wb') as f:
cookies = requests.utils.dict_from_cookiejar(self._session.cookies)
pickle.dump(cookies, f, protocol=2)
self._log.debug("Saved session to disk for future reference")
self._log.debug("User successfully authenticated")
self._is_authenticated = True
self._process_state()
return | [
"def",
"authenticate",
"(",
"self",
")",
":",
"valid",
"=",
"self",
".",
"_session_check",
"(",
")",
"if",
"self",
".",
"_is_authenticated",
"and",
"valid",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"[!] User has already authenticated\"",
")",
"return",
"init",
"=",
"self",
".",
"_session",
".",
"get",
"(",
"url",
"=",
"self",
".",
"LOGIN_URL",
",",
"headers",
"=",
"self",
".",
"HEADERS",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"init",
".",
"content",
",",
"\"html.parser\"",
")",
"soup_login",
"=",
"soup",
".",
"find",
"(",
"'form'",
")",
".",
"find_all",
"(",
"'input'",
")",
"post_data",
"=",
"dict",
"(",
")",
"for",
"u",
"in",
"soup_login",
":",
"if",
"u",
".",
"has_attr",
"(",
"'name'",
")",
"and",
"u",
".",
"has_attr",
"(",
"'value'",
")",
":",
"post_data",
"[",
"u",
"[",
"'name'",
"]",
"]",
"=",
"u",
"[",
"'value'",
"]",
"post_data",
"[",
"'Email'",
"]",
"=",
"self",
".",
"_email",
"post_data",
"[",
"'Passwd'",
"]",
"=",
"self",
".",
"_password",
"response",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
"=",
"self",
".",
"AUTH_URL",
",",
"data",
"=",
"post_data",
",",
"headers",
"=",
"self",
".",
"HEADERS",
")",
"if",
"self",
".",
"CAPTCHA_KEY",
"in",
"str",
"(",
"response",
".",
"content",
")",
":",
"raise",
"AccountCaptcha",
"(",
"'Google is forcing a CAPTCHA. To get around this issue, run the google-alerts with the seed option to open an interactive authentication session. Once authenticated, this module will cache your session and load that in the future'",
")",
"cookies",
"=",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"response",
".",
"cookies",
"]",
"if",
"'SIDCC'",
"not",
"in",
"cookies",
":",
"raise",
"InvalidCredentials",
"(",
"\"Email or password was incorrect.\"",
")",
"with",
"open",
"(",
"SESSION_FILE",
",",
"'wb'",
")",
"as",
"f",
":",
"cookies",
"=",
"requests",
".",
"utils",
".",
"dict_from_cookiejar",
"(",
"self",
".",
"_session",
".",
"cookies",
")",
"pickle",
".",
"dump",
"(",
"cookies",
",",
"f",
",",
"protocol",
"=",
"2",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Saved session to disk for future reference\"",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"User successfully authenticated\"",
")",
"self",
".",
"_is_authenticated",
"=",
"True",
"self",
".",
"_process_state",
"(",
")",
"return"
] | Authenticate the user and setup our state. | [
"Authenticate",
"the",
"user",
"and",
"setup",
"our",
"state",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L277-L306 |
9b/google-alerts | google_alerts/__init__.py | GoogleAlerts.list | def list(self, term=None):
"""List alerts configured for the account."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
self._process_state()
if not self._state[1]:
self._log.info("No monitors have been created yet.")
return list()
monitors = list()
for monitor in self._state[1][1]:
obj = dict()
obj['monitor_id'] = monitor[1]
obj['user_id'] = monitor[-1]
obj['term'] = monitor[2][3][1]
if term and obj['term'] != term:
continue
obj['language'] = monitor[2][3][3][1]
obj['region'] = monitor[2][3][3][2]
obj['delivery'] = self.DELIVERY[monitor[2][6][0][1]]
obj['match_type'] = self.MONITOR_MATCH_TYPE[monitor[2][5]]
if obj['delivery'] == 'MAIL':
obj['alert_frequency'] = self.ALERT_FREQ[monitor[2][6][0][4]]
obj['email_address'] = monitor[2][6][0][2]
else:
rss_id = monitor[2][6][0][11]
url = "https://google.com/alerts/feeds/{uid}/{fid}"
obj['rss_link'] = url.format(uid=obj['user_id'], fid=rss_id)
monitors.append(obj)
return monitors | python | def list(self, term=None):
"""List alerts configured for the account."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
self._process_state()
if not self._state[1]:
self._log.info("No monitors have been created yet.")
return list()
monitors = list()
for monitor in self._state[1][1]:
obj = dict()
obj['monitor_id'] = monitor[1]
obj['user_id'] = monitor[-1]
obj['term'] = monitor[2][3][1]
if term and obj['term'] != term:
continue
obj['language'] = monitor[2][3][3][1]
obj['region'] = monitor[2][3][3][2]
obj['delivery'] = self.DELIVERY[monitor[2][6][0][1]]
obj['match_type'] = self.MONITOR_MATCH_TYPE[monitor[2][5]]
if obj['delivery'] == 'MAIL':
obj['alert_frequency'] = self.ALERT_FREQ[monitor[2][6][0][4]]
obj['email_address'] = monitor[2][6][0][2]
else:
rss_id = monitor[2][6][0][11]
url = "https://google.com/alerts/feeds/{uid}/{fid}"
obj['rss_link'] = url.format(uid=obj['user_id'], fid=rss_id)
monitors.append(obj)
return monitors | [
"def",
"list",
"(",
"self",
",",
"term",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_state",
":",
"raise",
"InvalidState",
"(",
"\"State was not properly obtained from the app\"",
")",
"self",
".",
"_process_state",
"(",
")",
"if",
"not",
"self",
".",
"_state",
"[",
"1",
"]",
":",
"self",
".",
"_log",
".",
"info",
"(",
"\"No monitors have been created yet.\"",
")",
"return",
"list",
"(",
")",
"monitors",
"=",
"list",
"(",
")",
"for",
"monitor",
"in",
"self",
".",
"_state",
"[",
"1",
"]",
"[",
"1",
"]",
":",
"obj",
"=",
"dict",
"(",
")",
"obj",
"[",
"'monitor_id'",
"]",
"=",
"monitor",
"[",
"1",
"]",
"obj",
"[",
"'user_id'",
"]",
"=",
"monitor",
"[",
"-",
"1",
"]",
"obj",
"[",
"'term'",
"]",
"=",
"monitor",
"[",
"2",
"]",
"[",
"3",
"]",
"[",
"1",
"]",
"if",
"term",
"and",
"obj",
"[",
"'term'",
"]",
"!=",
"term",
":",
"continue",
"obj",
"[",
"'language'",
"]",
"=",
"monitor",
"[",
"2",
"]",
"[",
"3",
"]",
"[",
"3",
"]",
"[",
"1",
"]",
"obj",
"[",
"'region'",
"]",
"=",
"monitor",
"[",
"2",
"]",
"[",
"3",
"]",
"[",
"3",
"]",
"[",
"2",
"]",
"obj",
"[",
"'delivery'",
"]",
"=",
"self",
".",
"DELIVERY",
"[",
"monitor",
"[",
"2",
"]",
"[",
"6",
"]",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
"obj",
"[",
"'match_type'",
"]",
"=",
"self",
".",
"MONITOR_MATCH_TYPE",
"[",
"monitor",
"[",
"2",
"]",
"[",
"5",
"]",
"]",
"if",
"obj",
"[",
"'delivery'",
"]",
"==",
"'MAIL'",
":",
"obj",
"[",
"'alert_frequency'",
"]",
"=",
"self",
".",
"ALERT_FREQ",
"[",
"monitor",
"[",
"2",
"]",
"[",
"6",
"]",
"[",
"0",
"]",
"[",
"4",
"]",
"]",
"obj",
"[",
"'email_address'",
"]",
"=",
"monitor",
"[",
"2",
"]",
"[",
"6",
"]",
"[",
"0",
"]",
"[",
"2",
"]",
"else",
":",
"rss_id",
"=",
"monitor",
"[",
"2",
"]",
"[",
"6",
"]",
"[",
"0",
"]",
"[",
"11",
"]",
"url",
"=",
"\"https://google.com/alerts/feeds/{uid}/{fid}\"",
"obj",
"[",
"'rss_link'",
"]",
"=",
"url",
".",
"format",
"(",
"uid",
"=",
"obj",
"[",
"'user_id'",
"]",
",",
"fid",
"=",
"rss_id",
")",
"monitors",
".",
"append",
"(",
"obj",
")",
"return",
"monitors"
] | List alerts configured for the account. | [
"List",
"alerts",
"configured",
"for",
"the",
"account",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L308-L337 |
9b/google-alerts | google_alerts/__init__.py | GoogleAlerts.create | def create(self, term, options):
"""Create a monitor using passed configuration."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
options['action'] = 'CREATE'
payload = self._build_payload(term, options)
url = self.ALERTS_CREATE_URL.format(requestX=self._state[3])
self._log.debug("Creating alert using: %s" % url)
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to create monitor: %s"
% response.content)
if options.get('exact', False):
term = "\"%s\"" % term
return self.list(term) | python | def create(self, term, options):
"""Create a monitor using passed configuration."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
options['action'] = 'CREATE'
payload = self._build_payload(term, options)
url = self.ALERTS_CREATE_URL.format(requestX=self._state[3])
self._log.debug("Creating alert using: %s" % url)
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to create monitor: %s"
% response.content)
if options.get('exact', False):
term = "\"%s\"" % term
return self.list(term) | [
"def",
"create",
"(",
"self",
",",
"term",
",",
"options",
")",
":",
"if",
"not",
"self",
".",
"_state",
":",
"raise",
"InvalidState",
"(",
"\"State was not properly obtained from the app\"",
")",
"options",
"[",
"'action'",
"]",
"=",
"'CREATE'",
"payload",
"=",
"self",
".",
"_build_payload",
"(",
"term",
",",
"options",
")",
"url",
"=",
"self",
".",
"ALERTS_CREATE_URL",
".",
"format",
"(",
"requestX",
"=",
"self",
".",
"_state",
"[",
"3",
"]",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Creating alert using: %s\"",
"%",
"url",
")",
"params",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
")",
"data",
"=",
"{",
"'params'",
":",
"params",
"}",
"response",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"self",
".",
"HEADERS",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"ActionError",
"(",
"\"Failed to create monitor: %s\"",
"%",
"response",
".",
"content",
")",
"if",
"options",
".",
"get",
"(",
"'exact'",
",",
"False",
")",
":",
"term",
"=",
"\"\\\"%s\\\"\"",
"%",
"term",
"return",
"self",
".",
"list",
"(",
"term",
")"
] | Create a monitor using passed configuration. | [
"Create",
"a",
"monitor",
"using",
"passed",
"configuration",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L339-L355 |
9b/google-alerts | google_alerts/__init__.py | GoogleAlerts.modify | def modify(self, monitor_id, options):
"""Create a monitor using passed configuration."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
monitors = self.list() # Get the latest set of monitors
obj = None
for monitor in monitors:
if monitor_id != monitor['monitor_id']:
continue
obj = monitor
if not monitor_id:
raise MonitorNotFound("No monitor was found with that term.")
options['action'] = 'MODIFY'
options.update(obj)
payload = self._build_payload(obj['term'], options)
url = self.ALERTS_MODIFY_URL.format(requestX=self._state[3])
self._log.debug("Modifying alert using: %s" % url)
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to create monitor: %s"
% response.content)
return self.list() | python | def modify(self, monitor_id, options):
"""Create a monitor using passed configuration."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
monitors = self.list() # Get the latest set of monitors
obj = None
for monitor in monitors:
if monitor_id != monitor['monitor_id']:
continue
obj = monitor
if not monitor_id:
raise MonitorNotFound("No monitor was found with that term.")
options['action'] = 'MODIFY'
options.update(obj)
payload = self._build_payload(obj['term'], options)
url = self.ALERTS_MODIFY_URL.format(requestX=self._state[3])
self._log.debug("Modifying alert using: %s" % url)
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to create monitor: %s"
% response.content)
return self.list() | [
"def",
"modify",
"(",
"self",
",",
"monitor_id",
",",
"options",
")",
":",
"if",
"not",
"self",
".",
"_state",
":",
"raise",
"InvalidState",
"(",
"\"State was not properly obtained from the app\"",
")",
"monitors",
"=",
"self",
".",
"list",
"(",
")",
"# Get the latest set of monitors",
"obj",
"=",
"None",
"for",
"monitor",
"in",
"monitors",
":",
"if",
"monitor_id",
"!=",
"monitor",
"[",
"'monitor_id'",
"]",
":",
"continue",
"obj",
"=",
"monitor",
"if",
"not",
"monitor_id",
":",
"raise",
"MonitorNotFound",
"(",
"\"No monitor was found with that term.\"",
")",
"options",
"[",
"'action'",
"]",
"=",
"'MODIFY'",
"options",
".",
"update",
"(",
"obj",
")",
"payload",
"=",
"self",
".",
"_build_payload",
"(",
"obj",
"[",
"'term'",
"]",
",",
"options",
")",
"url",
"=",
"self",
".",
"ALERTS_MODIFY_URL",
".",
"format",
"(",
"requestX",
"=",
"self",
".",
"_state",
"[",
"3",
"]",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Modifying alert using: %s\"",
"%",
"url",
")",
"params",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
")",
"data",
"=",
"{",
"'params'",
":",
"params",
"}",
"response",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"self",
".",
"HEADERS",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"ActionError",
"(",
"\"Failed to create monitor: %s\"",
"%",
"response",
".",
"content",
")",
"return",
"self",
".",
"list",
"(",
")"
] | Create a monitor using passed configuration. | [
"Create",
"a",
"monitor",
"using",
"passed",
"configuration",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L357-L380 |
9b/google-alerts | google_alerts/__init__.py | GoogleAlerts.delete | def delete(self, monitor_id):
"""Delete a monitor by ID."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
monitors = self.list() # Get the latest set of monitors
bit = None
for monitor in monitors:
if monitor_id != monitor['monitor_id']:
continue
bit = monitor['monitor_id']
if not bit:
raise MonitorNotFound("No monitor was found with that term.")
url = self.ALERTS_DELETE_URL.format(requestX=self._state[3])
self._log.debug("Deleting alert using: %s" % url)
payload = [None, monitor_id]
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to delete by ID: %s"
% response.content)
return True | python | def delete(self, monitor_id):
"""Delete a monitor by ID."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
monitors = self.list() # Get the latest set of monitors
bit = None
for monitor in monitors:
if monitor_id != monitor['monitor_id']:
continue
bit = monitor['monitor_id']
if not bit:
raise MonitorNotFound("No monitor was found with that term.")
url = self.ALERTS_DELETE_URL.format(requestX=self._state[3])
self._log.debug("Deleting alert using: %s" % url)
payload = [None, monitor_id]
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to delete by ID: %s"
% response.content)
return True | [
"def",
"delete",
"(",
"self",
",",
"monitor_id",
")",
":",
"if",
"not",
"self",
".",
"_state",
":",
"raise",
"InvalidState",
"(",
"\"State was not properly obtained from the app\"",
")",
"monitors",
"=",
"self",
".",
"list",
"(",
")",
"# Get the latest set of monitors",
"bit",
"=",
"None",
"for",
"monitor",
"in",
"monitors",
":",
"if",
"monitor_id",
"!=",
"monitor",
"[",
"'monitor_id'",
"]",
":",
"continue",
"bit",
"=",
"monitor",
"[",
"'monitor_id'",
"]",
"if",
"not",
"bit",
":",
"raise",
"MonitorNotFound",
"(",
"\"No monitor was found with that term.\"",
")",
"url",
"=",
"self",
".",
"ALERTS_DELETE_URL",
".",
"format",
"(",
"requestX",
"=",
"self",
".",
"_state",
"[",
"3",
"]",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Deleting alert using: %s\"",
"%",
"url",
")",
"payload",
"=",
"[",
"None",
",",
"monitor_id",
"]",
"params",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
")",
"data",
"=",
"{",
"'params'",
":",
"params",
"}",
"response",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"self",
".",
"HEADERS",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"ActionError",
"(",
"\"Failed to delete by ID: %s\"",
"%",
"response",
".",
"content",
")",
"return",
"True"
] | Delete a monitor by ID. | [
"Delete",
"a",
"monitor",
"by",
"ID",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L382-L403 |
9b/google-alerts | google_alerts/cli/manage.py | main | def main():
"""Run the core."""
parser = ArgumentParser()
subs = parser.add_subparsers(dest='cmd')
setup_parser = subs.add_parser('setup')
setup_parser.add_argument('-e', '--email', dest='email', required=True,
help='Email of the Google user.', type=str)
setup_parser.add_argument('-p', '--password', dest='pwd', required=True,
help='Password of the Google user.', type=str)
setup_parser = subs.add_parser('seed')
setup_parser.add_argument('-d', '--driver', dest='driver',
required=True, type=str,
help='Location of the Chrome driver. This can be downloaded by visiting http://chromedriver.chromium.org/downloads',)
setup_parser = subs.add_parser('list')
setup_parser = subs.add_parser('create')
setup_parser.add_argument('-t', '--term', dest='term', required=True,
help='Term to store.', type=str)
setup_parser.add_argument('--exact', dest='exact', action='store_true',
help='Exact matches only for term.')
setup_parser.add_argument('-d', '--delivery', dest='delivery',
required=True, choices=['rss', 'mail'],
help='Delivery method of results.')
setup_parser.add_argument('-f', '--frequency', dest='frequency',
default="realtime", choices=['realtime', 'daily', 'weekly'],
help='Frequency to send results. RSS only allows for realtime alerting.')
setup_parser = subs.add_parser('delete')
setup_parser.add_argument('--id', dest='term_id', required=True,
help='ID of the term to find for deletion.',
type=str)
args = parser.parse_args()
if args.cmd == 'setup':
if not os.path.exists(CONFIG_PATH):
os.makedirs(CONFIG_PATH)
if not os.path.exists(CONFIG_FILE):
json.dump(CONFIG_DEFAULTS, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
config = CONFIG_DEFAULTS
config['email'] = args.email
config['password'] = str(obfuscate(args.pwd, 'store'))
json.dump(config, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
config = json.load(open(CONFIG_FILE))
if config.get('py2', PY2) != PY2:
raise Exception("Python versions have changed. Please run `setup` again to reconfigure the client.")
if config['password'] == '':
raise Exception("Run setup before any other actions!")
if args.cmd == 'seed':
config['password'] = obfuscate(str(config['password']), 'fetch')
ga = GoogleAlerts(config['email'], config['password'])
with contextlib.closing(webdriver.Chrome(args.driver)) as driver:
driver.get(ga.LOGIN_URL)
wait = ui.WebDriverWait(driver, 10) # timeout after 10 seconds
inputElement = driver.find_element_by_name('Email')
inputElement.send_keys(config['email'])
inputElement.submit()
time.sleep(3)
inputElement = driver.find_element_by_id('Passwd')
inputElement.send_keys(config['password'])
inputElement.submit()
print("[!] Waiting 15 seconds for authentication to complete")
time.sleep(15)
cookies = driver.get_cookies()
collected = dict()
for cookie in cookies:
collected[str(cookie['name'])] = str(cookie['value'])
with open(SESSION_FILE, 'wb') as f:
pickle.dump(collected, f, protocol=2)
print("Session has been seeded.")
if args.cmd == 'list':
config['password'] = obfuscate(str(config['password']), 'fetch')
ga = GoogleAlerts(config['email'], config['password'])
ga.authenticate()
print(json.dumps(ga.list(), indent=4))
if args.cmd == 'create':
config['password'] = obfuscate(str(config['password']), 'fetch')
ga = GoogleAlerts(config['email'], config['password'])
ga.authenticate()
alert_frequency = 'as_it_happens'
if args.frequency == 'realtime':
alert_frequency = 'as_it_happens'
elif args.frequency == 'daily':
alert_frequency = 'at_most_once_a_day'
else:
alert_frequency = 'at_most_once_a_week'
monitor = ga.create(args.term, {'delivery': args.delivery.upper(),
'alert_frequency': alert_frequency.upper(),
'exact': args.exact})
print(json.dumps(monitor, indent=4))
if args.cmd == 'delete':
config['password'] = obfuscate(str(config['password']), 'fetch')
ga = GoogleAlerts(config['email'], config['password'])
ga.authenticate()
result = ga.delete(args.term_id)
if result:
print("%s was deleted" % args.term_id) | python | def main():
"""Run the core."""
parser = ArgumentParser()
subs = parser.add_subparsers(dest='cmd')
setup_parser = subs.add_parser('setup')
setup_parser.add_argument('-e', '--email', dest='email', required=True,
help='Email of the Google user.', type=str)
setup_parser.add_argument('-p', '--password', dest='pwd', required=True,
help='Password of the Google user.', type=str)
setup_parser = subs.add_parser('seed')
setup_parser.add_argument('-d', '--driver', dest='driver',
required=True, type=str,
help='Location of the Chrome driver. This can be downloaded by visiting http://chromedriver.chromium.org/downloads',)
setup_parser = subs.add_parser('list')
setup_parser = subs.add_parser('create')
setup_parser.add_argument('-t', '--term', dest='term', required=True,
help='Term to store.', type=str)
setup_parser.add_argument('--exact', dest='exact', action='store_true',
help='Exact matches only for term.')
setup_parser.add_argument('-d', '--delivery', dest='delivery',
required=True, choices=['rss', 'mail'],
help='Delivery method of results.')
setup_parser.add_argument('-f', '--frequency', dest='frequency',
default="realtime", choices=['realtime', 'daily', 'weekly'],
help='Frequency to send results. RSS only allows for realtime alerting.')
setup_parser = subs.add_parser('delete')
setup_parser.add_argument('--id', dest='term_id', required=True,
help='ID of the term to find for deletion.',
type=str)
args = parser.parse_args()
if args.cmd == 'setup':
if not os.path.exists(CONFIG_PATH):
os.makedirs(CONFIG_PATH)
if not os.path.exists(CONFIG_FILE):
json.dump(CONFIG_DEFAULTS, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
config = CONFIG_DEFAULTS
config['email'] = args.email
config['password'] = str(obfuscate(args.pwd, 'store'))
json.dump(config, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
config = json.load(open(CONFIG_FILE))
if config.get('py2', PY2) != PY2:
raise Exception("Python versions have changed. Please run `setup` again to reconfigure the client.")
if config['password'] == '':
raise Exception("Run setup before any other actions!")
if args.cmd == 'seed':
config['password'] = obfuscate(str(config['password']), 'fetch')
ga = GoogleAlerts(config['email'], config['password'])
with contextlib.closing(webdriver.Chrome(args.driver)) as driver:
driver.get(ga.LOGIN_URL)
wait = ui.WebDriverWait(driver, 10) # timeout after 10 seconds
inputElement = driver.find_element_by_name('Email')
inputElement.send_keys(config['email'])
inputElement.submit()
time.sleep(3)
inputElement = driver.find_element_by_id('Passwd')
inputElement.send_keys(config['password'])
inputElement.submit()
print("[!] Waiting 15 seconds for authentication to complete")
time.sleep(15)
cookies = driver.get_cookies()
collected = dict()
for cookie in cookies:
collected[str(cookie['name'])] = str(cookie['value'])
with open(SESSION_FILE, 'wb') as f:
pickle.dump(collected, f, protocol=2)
print("Session has been seeded.")
if args.cmd == 'list':
config['password'] = obfuscate(str(config['password']), 'fetch')
ga = GoogleAlerts(config['email'], config['password'])
ga.authenticate()
print(json.dumps(ga.list(), indent=4))
if args.cmd == 'create':
config['password'] = obfuscate(str(config['password']), 'fetch')
ga = GoogleAlerts(config['email'], config['password'])
ga.authenticate()
alert_frequency = 'as_it_happens'
if args.frequency == 'realtime':
alert_frequency = 'as_it_happens'
elif args.frequency == 'daily':
alert_frequency = 'at_most_once_a_day'
else:
alert_frequency = 'at_most_once_a_week'
monitor = ga.create(args.term, {'delivery': args.delivery.upper(),
'alert_frequency': alert_frequency.upper(),
'exact': args.exact})
print(json.dumps(monitor, indent=4))
if args.cmd == 'delete':
config['password'] = obfuscate(str(config['password']), 'fetch')
ga = GoogleAlerts(config['email'], config['password'])
ga.authenticate()
result = ga.delete(args.term_id)
if result:
print("%s was deleted" % args.term_id) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"ArgumentParser",
"(",
")",
"subs",
"=",
"parser",
".",
"add_subparsers",
"(",
"dest",
"=",
"'cmd'",
")",
"setup_parser",
"=",
"subs",
".",
"add_parser",
"(",
"'setup'",
")",
"setup_parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--email'",
",",
"dest",
"=",
"'email'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Email of the Google user.'",
",",
"type",
"=",
"str",
")",
"setup_parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--password'",
",",
"dest",
"=",
"'pwd'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Password of the Google user.'",
",",
"type",
"=",
"str",
")",
"setup_parser",
"=",
"subs",
".",
"add_parser",
"(",
"'seed'",
")",
"setup_parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--driver'",
",",
"dest",
"=",
"'driver'",
",",
"required",
"=",
"True",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Location of the Chrome driver. This can be downloaded by visiting http://chromedriver.chromium.org/downloads'",
",",
")",
"setup_parser",
"=",
"subs",
".",
"add_parser",
"(",
"'list'",
")",
"setup_parser",
"=",
"subs",
".",
"add_parser",
"(",
"'create'",
")",
"setup_parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--term'",
",",
"dest",
"=",
"'term'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Term to store.'",
",",
"type",
"=",
"str",
")",
"setup_parser",
".",
"add_argument",
"(",
"'--exact'",
",",
"dest",
"=",
"'exact'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Exact matches only for term.'",
")",
"setup_parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--delivery'",
",",
"dest",
"=",
"'delivery'",
",",
"required",
"=",
"True",
",",
"choices",
"=",
"[",
"'rss'",
",",
"'mail'",
"]",
",",
"help",
"=",
"'Delivery method of results.'",
")",
"setup_parser",
".",
"add_argument",
"(",
"'-f'",
",",
"'--frequency'",
",",
"dest",
"=",
"'frequency'",
",",
"default",
"=",
"\"realtime\"",
",",
"choices",
"=",
"[",
"'realtime'",
",",
"'daily'",
",",
"'weekly'",
"]",
",",
"help",
"=",
"'Frequency to send results. RSS only allows for realtime alerting.'",
")",
"setup_parser",
"=",
"subs",
".",
"add_parser",
"(",
"'delete'",
")",
"setup_parser",
".",
"add_argument",
"(",
"'--id'",
",",
"dest",
"=",
"'term_id'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'ID of the term to find for deletion.'",
",",
"type",
"=",
"str",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"cmd",
"==",
"'setup'",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"CONFIG_PATH",
")",
":",
"os",
".",
"makedirs",
"(",
"CONFIG_PATH",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"CONFIG_FILE",
")",
":",
"json",
".",
"dump",
"(",
"CONFIG_DEFAULTS",
",",
"open",
"(",
"CONFIG_FILE",
",",
"'w'",
")",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"config",
"=",
"CONFIG_DEFAULTS",
"config",
"[",
"'email'",
"]",
"=",
"args",
".",
"email",
"config",
"[",
"'password'",
"]",
"=",
"str",
"(",
"obfuscate",
"(",
"args",
".",
"pwd",
",",
"'store'",
")",
")",
"json",
".",
"dump",
"(",
"config",
",",
"open",
"(",
"CONFIG_FILE",
",",
"'w'",
")",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"config",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"CONFIG_FILE",
")",
")",
"if",
"config",
".",
"get",
"(",
"'py2'",
",",
"PY2",
")",
"!=",
"PY2",
":",
"raise",
"Exception",
"(",
"\"Python versions have changed. Please run `setup` again to reconfigure the client.\"",
")",
"if",
"config",
"[",
"'password'",
"]",
"==",
"''",
":",
"raise",
"Exception",
"(",
"\"Run setup before any other actions!\"",
")",
"if",
"args",
".",
"cmd",
"==",
"'seed'",
":",
"config",
"[",
"'password'",
"]",
"=",
"obfuscate",
"(",
"str",
"(",
"config",
"[",
"'password'",
"]",
")",
",",
"'fetch'",
")",
"ga",
"=",
"GoogleAlerts",
"(",
"config",
"[",
"'email'",
"]",
",",
"config",
"[",
"'password'",
"]",
")",
"with",
"contextlib",
".",
"closing",
"(",
"webdriver",
".",
"Chrome",
"(",
"args",
".",
"driver",
")",
")",
"as",
"driver",
":",
"driver",
".",
"get",
"(",
"ga",
".",
"LOGIN_URL",
")",
"wait",
"=",
"ui",
".",
"WebDriverWait",
"(",
"driver",
",",
"10",
")",
"# timeout after 10 seconds",
"inputElement",
"=",
"driver",
".",
"find_element_by_name",
"(",
"'Email'",
")",
"inputElement",
".",
"send_keys",
"(",
"config",
"[",
"'email'",
"]",
")",
"inputElement",
".",
"submit",
"(",
")",
"time",
".",
"sleep",
"(",
"3",
")",
"inputElement",
"=",
"driver",
".",
"find_element_by_id",
"(",
"'Passwd'",
")",
"inputElement",
".",
"send_keys",
"(",
"config",
"[",
"'password'",
"]",
")",
"inputElement",
".",
"submit",
"(",
")",
"print",
"(",
"\"[!] Waiting 15 seconds for authentication to complete\"",
")",
"time",
".",
"sleep",
"(",
"15",
")",
"cookies",
"=",
"driver",
".",
"get_cookies",
"(",
")",
"collected",
"=",
"dict",
"(",
")",
"for",
"cookie",
"in",
"cookies",
":",
"collected",
"[",
"str",
"(",
"cookie",
"[",
"'name'",
"]",
")",
"]",
"=",
"str",
"(",
"cookie",
"[",
"'value'",
"]",
")",
"with",
"open",
"(",
"SESSION_FILE",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"collected",
",",
"f",
",",
"protocol",
"=",
"2",
")",
"print",
"(",
"\"Session has been seeded.\"",
")",
"if",
"args",
".",
"cmd",
"==",
"'list'",
":",
"config",
"[",
"'password'",
"]",
"=",
"obfuscate",
"(",
"str",
"(",
"config",
"[",
"'password'",
"]",
")",
",",
"'fetch'",
")",
"ga",
"=",
"GoogleAlerts",
"(",
"config",
"[",
"'email'",
"]",
",",
"config",
"[",
"'password'",
"]",
")",
"ga",
".",
"authenticate",
"(",
")",
"print",
"(",
"json",
".",
"dumps",
"(",
"ga",
".",
"list",
"(",
")",
",",
"indent",
"=",
"4",
")",
")",
"if",
"args",
".",
"cmd",
"==",
"'create'",
":",
"config",
"[",
"'password'",
"]",
"=",
"obfuscate",
"(",
"str",
"(",
"config",
"[",
"'password'",
"]",
")",
",",
"'fetch'",
")",
"ga",
"=",
"GoogleAlerts",
"(",
"config",
"[",
"'email'",
"]",
",",
"config",
"[",
"'password'",
"]",
")",
"ga",
".",
"authenticate",
"(",
")",
"alert_frequency",
"=",
"'as_it_happens'",
"if",
"args",
".",
"frequency",
"==",
"'realtime'",
":",
"alert_frequency",
"=",
"'as_it_happens'",
"elif",
"args",
".",
"frequency",
"==",
"'daily'",
":",
"alert_frequency",
"=",
"'at_most_once_a_day'",
"else",
":",
"alert_frequency",
"=",
"'at_most_once_a_week'",
"monitor",
"=",
"ga",
".",
"create",
"(",
"args",
".",
"term",
",",
"{",
"'delivery'",
":",
"args",
".",
"delivery",
".",
"upper",
"(",
")",
",",
"'alert_frequency'",
":",
"alert_frequency",
".",
"upper",
"(",
")",
",",
"'exact'",
":",
"args",
".",
"exact",
"}",
")",
"print",
"(",
"json",
".",
"dumps",
"(",
"monitor",
",",
"indent",
"=",
"4",
")",
")",
"if",
"args",
".",
"cmd",
"==",
"'delete'",
":",
"config",
"[",
"'password'",
"]",
"=",
"obfuscate",
"(",
"str",
"(",
"config",
"[",
"'password'",
"]",
")",
",",
"'fetch'",
")",
"ga",
"=",
"GoogleAlerts",
"(",
"config",
"[",
"'email'",
"]",
",",
"config",
"[",
"'password'",
"]",
")",
"ga",
".",
"authenticate",
"(",
")",
"result",
"=",
"ga",
".",
"delete",
"(",
"args",
".",
"term_id",
")",
"if",
"result",
":",
"print",
"(",
"\"%s was deleted\"",
"%",
"args",
".",
"term_id",
")"
] | Run the core. | [
"Run",
"the",
"core",
"."
] | train | https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/cli/manage.py#L65-L166 |
r1chardj0n3s/pip-check-reqs | pip_check_reqs/common.py | search_packages_info | def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = {}
for p in pkg_resources.working_set:
installed[canonicalize_name(p.project_name)] = p
query_names = [canonicalize_name(name) for name in query]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
file_list = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if file_list:
package['files'] = sorted(file_list)
yield package | python | def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = {}
for p in pkg_resources.working_set:
installed[canonicalize_name(p.project_name)] = p
query_names = [canonicalize_name(name) for name in query]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
file_list = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if file_list:
package['files'] = sorted(file_list)
yield package | [
"def",
"search_packages_info",
"(",
"query",
")",
":",
"installed",
"=",
"{",
"}",
"for",
"p",
"in",
"pkg_resources",
".",
"working_set",
":",
"installed",
"[",
"canonicalize_name",
"(",
"p",
".",
"project_name",
")",
"]",
"=",
"p",
"query_names",
"=",
"[",
"canonicalize_name",
"(",
"name",
")",
"for",
"name",
"in",
"query",
"]",
"for",
"dist",
"in",
"[",
"installed",
"[",
"pkg",
"]",
"for",
"pkg",
"in",
"query_names",
"if",
"pkg",
"in",
"installed",
"]",
":",
"package",
"=",
"{",
"'name'",
":",
"dist",
".",
"project_name",
",",
"'version'",
":",
"dist",
".",
"version",
",",
"'location'",
":",
"dist",
".",
"location",
",",
"'requires'",
":",
"[",
"dep",
".",
"project_name",
"for",
"dep",
"in",
"dist",
".",
"requires",
"(",
")",
"]",
",",
"}",
"file_list",
"=",
"None",
"if",
"isinstance",
"(",
"dist",
",",
"pkg_resources",
".",
"DistInfoDistribution",
")",
":",
"# RECORDs should be part of .dist-info metadatas",
"if",
"dist",
".",
"has_metadata",
"(",
"'RECORD'",
")",
":",
"lines",
"=",
"dist",
".",
"get_metadata_lines",
"(",
"'RECORD'",
")",
"paths",
"=",
"[",
"l",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"for",
"l",
"in",
"lines",
"]",
"paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"dist",
".",
"location",
",",
"p",
")",
"for",
"p",
"in",
"paths",
"]",
"file_list",
"=",
"[",
"os",
".",
"path",
".",
"relpath",
"(",
"p",
",",
"dist",
".",
"location",
")",
"for",
"p",
"in",
"paths",
"]",
"else",
":",
"# Otherwise use pip's log for .egg-info's",
"if",
"dist",
".",
"has_metadata",
"(",
"'installed-files.txt'",
")",
":",
"paths",
"=",
"dist",
".",
"get_metadata_lines",
"(",
"'installed-files.txt'",
")",
"paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"dist",
".",
"egg_info",
",",
"p",
")",
"for",
"p",
"in",
"paths",
"]",
"file_list",
"=",
"[",
"os",
".",
"path",
".",
"relpath",
"(",
"p",
",",
"dist",
".",
"location",
")",
"for",
"p",
"in",
"paths",
"]",
"if",
"file_list",
":",
"package",
"[",
"'files'",
"]",
"=",
"sorted",
"(",
"file_list",
")",
"yield",
"package"
] | Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory. | [
"Gather",
"details",
"from",
"installed",
"distributions",
".",
"Print",
"distribution",
"name",
"version",
"location",
"and",
"installed",
"files",
".",
"Installed",
"files",
"requires",
"a",
"pip",
"generated",
"installed",
"-",
"files",
".",
"txt",
"in",
"the",
"distributions",
".",
"egg",
"-",
"info",
"directory",
"."
] | train | https://github.com/r1chardj0n3s/pip-check-reqs/blob/2f9bfe821d122dca1877c51f288b8a8b31877f79/pip_check_reqs/common.py#L161-L198 |
loftylabs/django-developer-panel | djdev_panel/middleware.py | DebugMiddleware.process_view | def process_view(self, request, view_func, view_args, view_kwargs):
"""
Collect data on Class-Based Views
"""
# Purge data in view method cache
# Python 3's keys() method returns an iterator, so force evaluation before iterating.
view_keys = list(VIEW_METHOD_DATA.keys())
for key in view_keys:
del VIEW_METHOD_DATA[key]
self.view_data = {}
try:
cbv = view_func.view_class
except AttributeError:
cbv = False
if cbv:
self.view_data['cbv'] = True
klass = view_func.view_class
self.view_data['bases'] = [base.__name__ for base in inspect.getmro(klass)]
# Inject with drugz
for member in inspect.getmembers(view_func.view_class):
# Check that we are interested in capturing data for this method
# and ensure that a decorated method is not decorated multiple times.
if member[0] in VIEW_METHOD_WHITEIST and member[0] not in PATCHED_METHODS[klass]:
decorate_method(klass, member[0])
PATCHED_METHODS[klass].append(member[0]) | python | def process_view(self, request, view_func, view_args, view_kwargs):
"""
Collect data on Class-Based Views
"""
# Purge data in view method cache
# Python 3's keys() method returns an iterator, so force evaluation before iterating.
view_keys = list(VIEW_METHOD_DATA.keys())
for key in view_keys:
del VIEW_METHOD_DATA[key]
self.view_data = {}
try:
cbv = view_func.view_class
except AttributeError:
cbv = False
if cbv:
self.view_data['cbv'] = True
klass = view_func.view_class
self.view_data['bases'] = [base.__name__ for base in inspect.getmro(klass)]
# Inject with drugz
for member in inspect.getmembers(view_func.view_class):
# Check that we are interested in capturing data for this method
# and ensure that a decorated method is not decorated multiple times.
if member[0] in VIEW_METHOD_WHITEIST and member[0] not in PATCHED_METHODS[klass]:
decorate_method(klass, member[0])
PATCHED_METHODS[klass].append(member[0]) | [
"def",
"process_view",
"(",
"self",
",",
"request",
",",
"view_func",
",",
"view_args",
",",
"view_kwargs",
")",
":",
"# Purge data in view method cache",
"# Python 3's keys() method returns an iterator, so force evaluation before iterating.",
"view_keys",
"=",
"list",
"(",
"VIEW_METHOD_DATA",
".",
"keys",
"(",
")",
")",
"for",
"key",
"in",
"view_keys",
":",
"del",
"VIEW_METHOD_DATA",
"[",
"key",
"]",
"self",
".",
"view_data",
"=",
"{",
"}",
"try",
":",
"cbv",
"=",
"view_func",
".",
"view_class",
"except",
"AttributeError",
":",
"cbv",
"=",
"False",
"if",
"cbv",
":",
"self",
".",
"view_data",
"[",
"'cbv'",
"]",
"=",
"True",
"klass",
"=",
"view_func",
".",
"view_class",
"self",
".",
"view_data",
"[",
"'bases'",
"]",
"=",
"[",
"base",
".",
"__name__",
"for",
"base",
"in",
"inspect",
".",
"getmro",
"(",
"klass",
")",
"]",
"# Inject with drugz",
"for",
"member",
"in",
"inspect",
".",
"getmembers",
"(",
"view_func",
".",
"view_class",
")",
":",
"# Check that we are interested in capturing data for this method",
"# and ensure that a decorated method is not decorated multiple times.",
"if",
"member",
"[",
"0",
"]",
"in",
"VIEW_METHOD_WHITEIST",
"and",
"member",
"[",
"0",
"]",
"not",
"in",
"PATCHED_METHODS",
"[",
"klass",
"]",
":",
"decorate_method",
"(",
"klass",
",",
"member",
"[",
"0",
"]",
")",
"PATCHED_METHODS",
"[",
"klass",
"]",
".",
"append",
"(",
"member",
"[",
"0",
"]",
")"
] | Collect data on Class-Based Views | [
"Collect",
"data",
"on",
"Class",
"-",
"Based",
"Views"
] | train | https://github.com/loftylabs/django-developer-panel/blob/52fd2666a158b197fdd643d3c2c4d9bbc5ba230a/djdev_panel/middleware.py#L141-L171 |
loftylabs/django-developer-panel | djdev_panel/middleware.py | DebugMiddleware.process_response | def process_response(self, request, response):
"""Let's handle old-style response processing here, as usual."""
# For debug only.
if not settings.DEBUG:
return response
# Check for responses where the data can't be inserted.
content_encoding = response.get('Content-Encoding', '')
content_type = response.get('Content-Type', '').split(';')[0]
if any((getattr(response, 'streaming', False),
'gzip' in content_encoding,
content_type not in _HTML_TYPES)):
return response
content = force_text(response.content, encoding=settings.DEFAULT_CHARSET)
pattern = re.escape('</body>')
bits = re.split(pattern, content, flags=re.IGNORECASE)
if len(bits) > 1:
bits[-2] += debug_payload(request, response, self.view_data)
response.content = "</body>".join(bits)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response | python | def process_response(self, request, response):
"""Let's handle old-style response processing here, as usual."""
# For debug only.
if not settings.DEBUG:
return response
# Check for responses where the data can't be inserted.
content_encoding = response.get('Content-Encoding', '')
content_type = response.get('Content-Type', '').split(';')[0]
if any((getattr(response, 'streaming', False),
'gzip' in content_encoding,
content_type not in _HTML_TYPES)):
return response
content = force_text(response.content, encoding=settings.DEFAULT_CHARSET)
pattern = re.escape('</body>')
bits = re.split(pattern, content, flags=re.IGNORECASE)
if len(bits) > 1:
bits[-2] += debug_payload(request, response, self.view_data)
response.content = "</body>".join(bits)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response | [
"def",
"process_response",
"(",
"self",
",",
"request",
",",
"response",
")",
":",
"# For debug only.",
"if",
"not",
"settings",
".",
"DEBUG",
":",
"return",
"response",
"# Check for responses where the data can't be inserted.",
"content_encoding",
"=",
"response",
".",
"get",
"(",
"'Content-Encoding'",
",",
"''",
")",
"content_type",
"=",
"response",
".",
"get",
"(",
"'Content-Type'",
",",
"''",
")",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
"if",
"any",
"(",
"(",
"getattr",
"(",
"response",
",",
"'streaming'",
",",
"False",
")",
",",
"'gzip'",
"in",
"content_encoding",
",",
"content_type",
"not",
"in",
"_HTML_TYPES",
")",
")",
":",
"return",
"response",
"content",
"=",
"force_text",
"(",
"response",
".",
"content",
",",
"encoding",
"=",
"settings",
".",
"DEFAULT_CHARSET",
")",
"pattern",
"=",
"re",
".",
"escape",
"(",
"'</body>'",
")",
"bits",
"=",
"re",
".",
"split",
"(",
"pattern",
",",
"content",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"if",
"len",
"(",
"bits",
")",
">",
"1",
":",
"bits",
"[",
"-",
"2",
"]",
"+=",
"debug_payload",
"(",
"request",
",",
"response",
",",
"self",
".",
"view_data",
")",
"response",
".",
"content",
"=",
"\"</body>\"",
".",
"join",
"(",
"bits",
")",
"if",
"response",
".",
"get",
"(",
"'Content-Length'",
",",
"None",
")",
":",
"response",
"[",
"'Content-Length'",
"]",
"=",
"len",
"(",
"response",
".",
"content",
")",
"return",
"response"
] | Let's handle old-style response processing here, as usual. | [
"Let",
"s",
"handle",
"old",
"-",
"style",
"response",
"processing",
"here",
"as",
"usual",
"."
] | train | https://github.com/loftylabs/django-developer-panel/blob/52fd2666a158b197fdd643d3c2c4d9bbc5ba230a/djdev_panel/middleware.py#L191-L217 |
codeinthehole/django-cacheback | cacheback/utils.py | get_job_class | def get_job_class(klass_str):
"""
Return the job class
"""
mod_name, klass_name = klass_str.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
except ImportError as e:
logger.error("Error importing job module %s: '%s'", mod_name, e)
return
try:
klass = getattr(mod, klass_name)
except AttributeError:
logger.error("Module '%s' does not define a '%s' class", mod_name, klass_name)
return
return klass | python | def get_job_class(klass_str):
"""
Return the job class
"""
mod_name, klass_name = klass_str.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
except ImportError as e:
logger.error("Error importing job module %s: '%s'", mod_name, e)
return
try:
klass = getattr(mod, klass_name)
except AttributeError:
logger.error("Module '%s' does not define a '%s' class", mod_name, klass_name)
return
return klass | [
"def",
"get_job_class",
"(",
"klass_str",
")",
":",
"mod_name",
",",
"klass_name",
"=",
"klass_str",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"try",
":",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"mod_name",
")",
"except",
"ImportError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"Error importing job module %s: '%s'\"",
",",
"mod_name",
",",
"e",
")",
"return",
"try",
":",
"klass",
"=",
"getattr",
"(",
"mod",
",",
"klass_name",
")",
"except",
"AttributeError",
":",
"logger",
".",
"error",
"(",
"\"Module '%s' does not define a '%s' class\"",
",",
"mod_name",
",",
"klass_name",
")",
"return",
"return",
"klass"
] | Return the job class | [
"Return",
"the",
"job",
"class"
] | train | https://github.com/codeinthehole/django-cacheback/blob/0c79a524a28ca2fada98ed58c26c544f07a58e14/cacheback/utils.py#L32-L47 |
codeinthehole/django-cacheback | cacheback/base.py | Job.get | def get(self, *raw_args, **raw_kwargs):
"""
Return the data for this function (using the cache if possible).
This method is not intended to be overidden
"""
# We pass args and kwargs through a filter to allow them to be
# converted into values that can be pickled.
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
# Build the cache key and attempt to fetch the cached item
key = self.key(*args, **kwargs)
item = self.cache.get(key)
call = Call(args=raw_args, kwargs=raw_kwargs)
if item is None:
# Cache MISS - we can either:
# a) fetch the data immediately, blocking execution until
# the fetch has finished, or
# b) trigger an async refresh and return an empty result
if self.should_missing_item_be_fetched_synchronously(*args, **kwargs):
logger.debug(("Job %s with key '%s' - cache MISS - running "
"synchronous refresh"),
self.class_path, key)
result = self.refresh(*args, **kwargs)
return self.process_result(
result, call=call, cache_status=self.MISS, sync_fetch=True)
else:
logger.debug(("Job %s with key '%s' - cache MISS - triggering "
"async refresh and returning empty result"),
self.class_path, key)
# To avoid cache hammering (ie lots of identical tasks
# to refresh the same cache item), we reset the cache with an
# empty result which will be returned until the cache is
# refreshed.
result = self.empty()
self.store(key, self.timeout(*args, **kwargs), result)
self.async_refresh(*args, **kwargs)
return self.process_result(
result, call=call, cache_status=self.MISS,
sync_fetch=False)
expiry, data = item
delta = time.time() - expiry
if delta > 0:
# Cache HIT but STALE expiry - we can either:
# a) fetch the data immediately, blocking execution until
# the fetch has finished, or
# b) trigger a refresh but allow the stale result to be
# returned this time. This is normally acceptable.
if self.should_stale_item_be_fetched_synchronously(
delta, *args, **kwargs):
logger.debug(
("Job %s with key '%s' - STALE cache hit - running "
"synchronous refresh"),
self.class_path, key)
result = self.refresh(*args, **kwargs)
return self.process_result(
result, call=call, cache_status=self.STALE,
sync_fetch=True)
else:
logger.debug(
("Job %s with key '%s' - STALE cache hit - triggering "
"async refresh and returning stale result"),
self.class_path, key)
# We replace the item in the cache with a 'timeout' expiry - this
# prevents cache hammering but guards against a 'limbo' situation
# where the refresh task fails for some reason.
timeout = self.timeout(*args, **kwargs)
self.store(key, timeout, data)
self.async_refresh(*args, **kwargs)
return self.process_result(
data, call=call, cache_status=self.STALE, sync_fetch=False)
else:
logger.debug("Job %s with key '%s' - cache HIT", self.class_path, key)
return self.process_result(data, call=call, cache_status=self.HIT) | python | def get(self, *raw_args, **raw_kwargs):
"""
Return the data for this function (using the cache if possible).
This method is not intended to be overidden
"""
# We pass args and kwargs through a filter to allow them to be
# converted into values that can be pickled.
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
# Build the cache key and attempt to fetch the cached item
key = self.key(*args, **kwargs)
item = self.cache.get(key)
call = Call(args=raw_args, kwargs=raw_kwargs)
if item is None:
# Cache MISS - we can either:
# a) fetch the data immediately, blocking execution until
# the fetch has finished, or
# b) trigger an async refresh and return an empty result
if self.should_missing_item_be_fetched_synchronously(*args, **kwargs):
logger.debug(("Job %s with key '%s' - cache MISS - running "
"synchronous refresh"),
self.class_path, key)
result = self.refresh(*args, **kwargs)
return self.process_result(
result, call=call, cache_status=self.MISS, sync_fetch=True)
else:
logger.debug(("Job %s with key '%s' - cache MISS - triggering "
"async refresh and returning empty result"),
self.class_path, key)
# To avoid cache hammering (ie lots of identical tasks
# to refresh the same cache item), we reset the cache with an
# empty result which will be returned until the cache is
# refreshed.
result = self.empty()
self.store(key, self.timeout(*args, **kwargs), result)
self.async_refresh(*args, **kwargs)
return self.process_result(
result, call=call, cache_status=self.MISS,
sync_fetch=False)
expiry, data = item
delta = time.time() - expiry
if delta > 0:
# Cache HIT but STALE expiry - we can either:
# a) fetch the data immediately, blocking execution until
# the fetch has finished, or
# b) trigger a refresh but allow the stale result to be
# returned this time. This is normally acceptable.
if self.should_stale_item_be_fetched_synchronously(
delta, *args, **kwargs):
logger.debug(
("Job %s with key '%s' - STALE cache hit - running "
"synchronous refresh"),
self.class_path, key)
result = self.refresh(*args, **kwargs)
return self.process_result(
result, call=call, cache_status=self.STALE,
sync_fetch=True)
else:
logger.debug(
("Job %s with key '%s' - STALE cache hit - triggering "
"async refresh and returning stale result"),
self.class_path, key)
# We replace the item in the cache with a 'timeout' expiry - this
# prevents cache hammering but guards against a 'limbo' situation
# where the refresh task fails for some reason.
timeout = self.timeout(*args, **kwargs)
self.store(key, timeout, data)
self.async_refresh(*args, **kwargs)
return self.process_result(
data, call=call, cache_status=self.STALE, sync_fetch=False)
else:
logger.debug("Job %s with key '%s' - cache HIT", self.class_path, key)
return self.process_result(data, call=call, cache_status=self.HIT) | [
"def",
"get",
"(",
"self",
",",
"*",
"raw_args",
",",
"*",
"*",
"raw_kwargs",
")",
":",
"# We pass args and kwargs through a filter to allow them to be",
"# converted into values that can be pickled.",
"args",
"=",
"self",
".",
"prepare_args",
"(",
"*",
"raw_args",
")",
"kwargs",
"=",
"self",
".",
"prepare_kwargs",
"(",
"*",
"*",
"raw_kwargs",
")",
"# Build the cache key and attempt to fetch the cached item",
"key",
"=",
"self",
".",
"key",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"item",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"key",
")",
"call",
"=",
"Call",
"(",
"args",
"=",
"raw_args",
",",
"kwargs",
"=",
"raw_kwargs",
")",
"if",
"item",
"is",
"None",
":",
"# Cache MISS - we can either:",
"# a) fetch the data immediately, blocking execution until",
"# the fetch has finished, or",
"# b) trigger an async refresh and return an empty result",
"if",
"self",
".",
"should_missing_item_be_fetched_synchronously",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"debug",
"(",
"(",
"\"Job %s with key '%s' - cache MISS - running \"",
"\"synchronous refresh\"",
")",
",",
"self",
".",
"class_path",
",",
"key",
")",
"result",
"=",
"self",
".",
"refresh",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"process_result",
"(",
"result",
",",
"call",
"=",
"call",
",",
"cache_status",
"=",
"self",
".",
"MISS",
",",
"sync_fetch",
"=",
"True",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"(",
"\"Job %s with key '%s' - cache MISS - triggering \"",
"\"async refresh and returning empty result\"",
")",
",",
"self",
".",
"class_path",
",",
"key",
")",
"# To avoid cache hammering (ie lots of identical tasks",
"# to refresh the same cache item), we reset the cache with an",
"# empty result which will be returned until the cache is",
"# refreshed.",
"result",
"=",
"self",
".",
"empty",
"(",
")",
"self",
".",
"store",
"(",
"key",
",",
"self",
".",
"timeout",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"result",
")",
"self",
".",
"async_refresh",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"process_result",
"(",
"result",
",",
"call",
"=",
"call",
",",
"cache_status",
"=",
"self",
".",
"MISS",
",",
"sync_fetch",
"=",
"False",
")",
"expiry",
",",
"data",
"=",
"item",
"delta",
"=",
"time",
".",
"time",
"(",
")",
"-",
"expiry",
"if",
"delta",
">",
"0",
":",
"# Cache HIT but STALE expiry - we can either:",
"# a) fetch the data immediately, blocking execution until",
"# the fetch has finished, or",
"# b) trigger a refresh but allow the stale result to be",
"# returned this time. This is normally acceptable.",
"if",
"self",
".",
"should_stale_item_be_fetched_synchronously",
"(",
"delta",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"debug",
"(",
"(",
"\"Job %s with key '%s' - STALE cache hit - running \"",
"\"synchronous refresh\"",
")",
",",
"self",
".",
"class_path",
",",
"key",
")",
"result",
"=",
"self",
".",
"refresh",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"process_result",
"(",
"result",
",",
"call",
"=",
"call",
",",
"cache_status",
"=",
"self",
".",
"STALE",
",",
"sync_fetch",
"=",
"True",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"(",
"\"Job %s with key '%s' - STALE cache hit - triggering \"",
"\"async refresh and returning stale result\"",
")",
",",
"self",
".",
"class_path",
",",
"key",
")",
"# We replace the item in the cache with a 'timeout' expiry - this",
"# prevents cache hammering but guards against a 'limbo' situation",
"# where the refresh task fails for some reason.",
"timeout",
"=",
"self",
".",
"timeout",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"store",
"(",
"key",
",",
"timeout",
",",
"data",
")",
"self",
".",
"async_refresh",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"process_result",
"(",
"data",
",",
"call",
"=",
"call",
",",
"cache_status",
"=",
"self",
".",
"STALE",
",",
"sync_fetch",
"=",
"False",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Job %s with key '%s' - cache HIT\"",
",",
"self",
".",
"class_path",
",",
"key",
")",
"return",
"self",
".",
"process_result",
"(",
"data",
",",
"call",
"=",
"call",
",",
"cache_status",
"=",
"self",
".",
"HIT",
")"
] | Return the data for this function (using the cache if possible).
This method is not intended to be overidden | [
"Return",
"the",
"data",
"for",
"this",
"function",
"(",
"using",
"the",
"cache",
"if",
"possible",
")",
"."
] | train | https://github.com/codeinthehole/django-cacheback/blob/0c79a524a28ca2fada98ed58c26c544f07a58e14/cacheback/base.py#L133-L212 |
codeinthehole/django-cacheback | cacheback/base.py | Job.invalidate | def invalidate(self, *raw_args, **raw_kwargs):
"""
Mark a cached item invalid and trigger an asynchronous
job to refresh the cache
"""
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
item = self.cache.get(key)
if item is not None:
expiry, data = item
self.store(key, self.timeout(*args, **kwargs), data)
self.async_refresh(*args, **kwargs) | python | def invalidate(self, *raw_args, **raw_kwargs):
"""
Mark a cached item invalid and trigger an asynchronous
job to refresh the cache
"""
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
item = self.cache.get(key)
if item is not None:
expiry, data = item
self.store(key, self.timeout(*args, **kwargs), data)
self.async_refresh(*args, **kwargs) | [
"def",
"invalidate",
"(",
"self",
",",
"*",
"raw_args",
",",
"*",
"*",
"raw_kwargs",
")",
":",
"args",
"=",
"self",
".",
"prepare_args",
"(",
"*",
"raw_args",
")",
"kwargs",
"=",
"self",
".",
"prepare_kwargs",
"(",
"*",
"*",
"raw_kwargs",
")",
"key",
"=",
"self",
".",
"key",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"item",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"key",
")",
"if",
"item",
"is",
"not",
"None",
":",
"expiry",
",",
"data",
"=",
"item",
"self",
".",
"store",
"(",
"key",
",",
"self",
".",
"timeout",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"data",
")",
"self",
".",
"async_refresh",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Mark a cached item invalid and trigger an asynchronous
job to refresh the cache | [
"Mark",
"a",
"cached",
"item",
"invalid",
"and",
"trigger",
"an",
"asynchronous",
"job",
"to",
"refresh",
"the",
"cache"
] | train | https://github.com/codeinthehole/django-cacheback/blob/0c79a524a28ca2fada98ed58c26c544f07a58e14/cacheback/base.py#L214-L226 |
codeinthehole/django-cacheback | cacheback/base.py | Job.delete | def delete(self, *raw_args, **raw_kwargs):
"""
Remove an item from the cache
"""
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
item = self.cache.get(key)
if item is not None:
self.cache.delete(key) | python | def delete(self, *raw_args, **raw_kwargs):
"""
Remove an item from the cache
"""
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
item = self.cache.get(key)
if item is not None:
self.cache.delete(key) | [
"def",
"delete",
"(",
"self",
",",
"*",
"raw_args",
",",
"*",
"*",
"raw_kwargs",
")",
":",
"args",
"=",
"self",
".",
"prepare_args",
"(",
"*",
"raw_args",
")",
"kwargs",
"=",
"self",
".",
"prepare_kwargs",
"(",
"*",
"*",
"raw_kwargs",
")",
"key",
"=",
"self",
".",
"key",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"item",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"key",
")",
"if",
"item",
"is",
"not",
"None",
":",
"self",
".",
"cache",
".",
"delete",
"(",
"key",
")"
] | Remove an item from the cache | [
"Remove",
"an",
"item",
"from",
"the",
"cache"
] | train | https://github.com/codeinthehole/django-cacheback/blob/0c79a524a28ca2fada98ed58c26c544f07a58e14/cacheback/base.py#L228-L237 |
codeinthehole/django-cacheback | cacheback/base.py | Job.raw_get | def raw_get(self, *raw_args, **raw_kwargs):
"""
Retrieve the item (tuple of value and expiry) that is actually in the cache,
without causing a refresh.
"""
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
return self.cache.get(key) | python | def raw_get(self, *raw_args, **raw_kwargs):
"""
Retrieve the item (tuple of value and expiry) that is actually in the cache,
without causing a refresh.
"""
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
return self.cache.get(key) | [
"def",
"raw_get",
"(",
"self",
",",
"*",
"raw_args",
",",
"*",
"*",
"raw_kwargs",
")",
":",
"args",
"=",
"self",
".",
"prepare_args",
"(",
"*",
"raw_args",
")",
"kwargs",
"=",
"self",
".",
"prepare_kwargs",
"(",
"*",
"*",
"raw_kwargs",
")",
"key",
"=",
"self",
".",
"key",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"cache",
".",
"get",
"(",
"key",
")"
] | Retrieve the item (tuple of value and expiry) that is actually in the cache,
without causing a refresh. | [
"Retrieve",
"the",
"item",
"(",
"tuple",
"of",
"value",
"and",
"expiry",
")",
"that",
"is",
"actually",
"in",
"the",
"cache",
"without",
"causing",
"a",
"refresh",
"."
] | train | https://github.com/codeinthehole/django-cacheback/blob/0c79a524a28ca2fada98ed58c26c544f07a58e14/cacheback/base.py#L239-L250 |
codeinthehole/django-cacheback | cacheback/base.py | Job.set | def set(self, *raw_args, **raw_kwargs):
"""
Manually set the cache value with its appropriate expiry.
"""
if self.set_data_kwarg in raw_kwargs:
data = raw_kwargs.pop(self.set_data_kwarg)
else:
raw_args = list(raw_args)
data = raw_args.pop()
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
expiry = self.expiry(*args, **kwargs)
logger.debug("Setting %s cache with key '%s', args '%r', kwargs '%r', expiry '%r'",
self.class_path, key, args, kwargs, expiry)
self.store(key, expiry, data) | python | def set(self, *raw_args, **raw_kwargs):
"""
Manually set the cache value with its appropriate expiry.
"""
if self.set_data_kwarg in raw_kwargs:
data = raw_kwargs.pop(self.set_data_kwarg)
else:
raw_args = list(raw_args)
data = raw_args.pop()
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
expiry = self.expiry(*args, **kwargs)
logger.debug("Setting %s cache with key '%s', args '%r', kwargs '%r', expiry '%r'",
self.class_path, key, args, kwargs, expiry)
self.store(key, expiry, data) | [
"def",
"set",
"(",
"self",
",",
"*",
"raw_args",
",",
"*",
"*",
"raw_kwargs",
")",
":",
"if",
"self",
".",
"set_data_kwarg",
"in",
"raw_kwargs",
":",
"data",
"=",
"raw_kwargs",
".",
"pop",
"(",
"self",
".",
"set_data_kwarg",
")",
"else",
":",
"raw_args",
"=",
"list",
"(",
"raw_args",
")",
"data",
"=",
"raw_args",
".",
"pop",
"(",
")",
"args",
"=",
"self",
".",
"prepare_args",
"(",
"*",
"raw_args",
")",
"kwargs",
"=",
"self",
".",
"prepare_kwargs",
"(",
"*",
"*",
"raw_kwargs",
")",
"key",
"=",
"self",
".",
"key",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"expiry",
"=",
"self",
".",
"expiry",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"logger",
".",
"debug",
"(",
"\"Setting %s cache with key '%s', args '%r', kwargs '%r', expiry '%r'\"",
",",
"self",
".",
"class_path",
",",
"key",
",",
"args",
",",
"kwargs",
",",
"expiry",
")",
"self",
".",
"store",
"(",
"key",
",",
"expiry",
",",
"data",
")"
] | Manually set the cache value with its appropriate expiry. | [
"Manually",
"set",
"the",
"cache",
"value",
"with",
"its",
"appropriate",
"expiry",
"."
] | train | https://github.com/codeinthehole/django-cacheback/blob/0c79a524a28ca2fada98ed58c26c544f07a58e14/cacheback/base.py#L252-L272 |
codeinthehole/django-cacheback | cacheback/base.py | Job.store | def store(self, key, expiry, data):
"""
Add a result to the cache
:key: Cache key to use
:expiry: The expiry timestamp after which the result is stale
:data: The data to cache
"""
self.cache.set(key, (expiry, data), self.cache_ttl)
if getattr(settings, 'CACHEBACK_VERIFY_CACHE_WRITE', True):
# We verify that the item was cached correctly. This is to avoid a
# Memcache problem where some values aren't cached correctly
# without warning.
__, cached_data = self.cache.get(key, (None, None))
if data is not None and cached_data is None:
raise RuntimeError(
"Unable to save data of type %s to cache" % (
type(data))) | python | def store(self, key, expiry, data):
"""
Add a result to the cache
:key: Cache key to use
:expiry: The expiry timestamp after which the result is stale
:data: The data to cache
"""
self.cache.set(key, (expiry, data), self.cache_ttl)
if getattr(settings, 'CACHEBACK_VERIFY_CACHE_WRITE', True):
# We verify that the item was cached correctly. This is to avoid a
# Memcache problem where some values aren't cached correctly
# without warning.
__, cached_data = self.cache.get(key, (None, None))
if data is not None and cached_data is None:
raise RuntimeError(
"Unable to save data of type %s to cache" % (
type(data))) | [
"def",
"store",
"(",
"self",
",",
"key",
",",
"expiry",
",",
"data",
")",
":",
"self",
".",
"cache",
".",
"set",
"(",
"key",
",",
"(",
"expiry",
",",
"data",
")",
",",
"self",
".",
"cache_ttl",
")",
"if",
"getattr",
"(",
"settings",
",",
"'CACHEBACK_VERIFY_CACHE_WRITE'",
",",
"True",
")",
":",
"# We verify that the item was cached correctly. This is to avoid a",
"# Memcache problem where some values aren't cached correctly",
"# without warning.",
"__",
",",
"cached_data",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"key",
",",
"(",
"None",
",",
"None",
")",
")",
"if",
"data",
"is",
"not",
"None",
"and",
"cached_data",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"Unable to save data of type %s to cache\"",
"%",
"(",
"type",
"(",
"data",
")",
")",
")"
] | Add a result to the cache
:key: Cache key to use
:expiry: The expiry timestamp after which the result is stale
:data: The data to cache | [
"Add",
"a",
"result",
"to",
"the",
"cache"
] | train | https://github.com/codeinthehole/django-cacheback/blob/0c79a524a28ca2fada98ed58c26c544f07a58e14/cacheback/base.py#L284-L302 |
codeinthehole/django-cacheback | cacheback/base.py | Job.refresh | def refresh(self, *args, **kwargs):
"""
Fetch the result SYNCHRONOUSLY and populate the cache
"""
result = self.fetch(*args, **kwargs)
self.store(self.key(*args, **kwargs), self.expiry(*args, **kwargs), result)
return result | python | def refresh(self, *args, **kwargs):
"""
Fetch the result SYNCHRONOUSLY and populate the cache
"""
result = self.fetch(*args, **kwargs)
self.store(self.key(*args, **kwargs), self.expiry(*args, **kwargs), result)
return result | [
"def",
"refresh",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"self",
".",
"fetch",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"store",
"(",
"self",
".",
"key",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"self",
".",
"expiry",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"result",
")",
"return",
"result"
] | Fetch the result SYNCHRONOUSLY and populate the cache | [
"Fetch",
"the",
"result",
"SYNCHRONOUSLY",
"and",
"populate",
"the",
"cache"
] | train | https://github.com/codeinthehole/django-cacheback/blob/0c79a524a28ca2fada98ed58c26c544f07a58e14/cacheback/base.py#L304-L310 |
Subsets and Splits