body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def NormalizeFieldTypeName(field_fqn):
'Normalize a fully qualified field type name, e.g.\n\n .envoy.foo.bar.\n\n Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.\n\n Args:\n field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.\n Return: Normalized type name.\n '
if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX):
return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):]
if field_fqn.startswith(ENVOY_PREFIX):
return field_fqn[len(ENVOY_PREFIX):]
return field_fqn | -589,018,630,924,515,800 | Normalize a fully qualified field type name, e.g.
.envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.
Return: Normalized type name. | tools/protodoc/protodoc.py | NormalizeFieldTypeName | Gsantomaggio/envoy | python | def NormalizeFieldTypeName(field_fqn):
'Normalize a fully qualified field type name, e.g.\n\n .envoy.foo.bar.\n\n Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.\n\n Args:\n field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.\n Return: Normalized type name.\n '
if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX):
return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):]
if field_fqn.startswith(ENVOY_PREFIX):
return field_fqn[len(ENVOY_PREFIX):]
return field_fqn |
def NormalizeTypeContextName(type_name):
'Normalize a type name, e.g.\n\n envoy.foo.bar.\n\n Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.\n\n Args:\n type_name: a name from a TypeContext.\n Return: Normalized type name.\n '
return NormalizeFieldTypeName(QualifyTypeName(type_name)) | 6,082,845,560,899,143,000 | Normalize a type name, e.g.
envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
type_name: a name from a TypeContext.
Return: Normalized type name. | tools/protodoc/protodoc.py | NormalizeTypeContextName | Gsantomaggio/envoy | python | def NormalizeTypeContextName(type_name):
'Normalize a type name, e.g.\n\n envoy.foo.bar.\n\n Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.\n\n Args:\n type_name: a name from a TypeContext.\n Return: Normalized type name.\n '
return NormalizeFieldTypeName(QualifyTypeName(type_name)) |
def FormatEmph(s):
'RST format a string for emphasis.'
return ('*%s*' % s) | 5,429,408,957,495,039,000 | RST format a string for emphasis. | tools/protodoc/protodoc.py | FormatEmph | Gsantomaggio/envoy | python | def FormatEmph(s):
return ('*%s*' % s) |
def FormatFieldType(type_context, field):
'Format a FieldDescriptorProto type description.\n\n Adds cross-refs for message types.\n TODO(htuch): Add cross-refs for enums as well.\n\n Args:\n type_context: contextual information for message/enum/field.\n field: FieldDescriptor proto.\n Return: RST formatted field type.\n '
if (field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith(ENVOY_PREFIX)):
type_name = NormalizeFieldTypeName(field.type_name)
if (field.type == field.TYPE_MESSAGE):
if (type_context.map_typenames and (TypeNameFromFQN(field.type_name) in type_context.map_typenames)):
return ('map<%s, %s>' % tuple(map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])))
return FormatInternalLink(type_name, MessageCrossRefLabel(type_name))
if (field.type == field.TYPE_ENUM):
return FormatInternalLink(type_name, EnumCrossRefLabel(type_name))
elif field.type_name.startswith(WKT_NAMESPACE_PREFIX):
wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):]
return FormatExternalLink(wkt, ('https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()))
elif field.type_name.startswith(RPC_NAMESPACE_PREFIX):
rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):]
return FormatExternalLink(rpc, ('https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()))
elif field.type_name:
return field.type_name
pretty_type_names = {field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes'}
if (field.type in pretty_type_names):
return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar')
raise ProtodocError(('Unknown field type ' + str(field.type))) | 5,157,686,213,921,965,000 | Format a FieldDescriptorProto type description.
Adds cross-refs for message types.
TODO(htuch): Add cross-refs for enums as well.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted field type. | tools/protodoc/protodoc.py | FormatFieldType | Gsantomaggio/envoy | python | def FormatFieldType(type_context, field):
'Format a FieldDescriptorProto type description.\n\n Adds cross-refs for message types.\n TODO(htuch): Add cross-refs for enums as well.\n\n Args:\n type_context: contextual information for message/enum/field.\n field: FieldDescriptor proto.\n Return: RST formatted field type.\n '
if (field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith(ENVOY_PREFIX)):
type_name = NormalizeFieldTypeName(field.type_name)
if (field.type == field.TYPE_MESSAGE):
if (type_context.map_typenames and (TypeNameFromFQN(field.type_name) in type_context.map_typenames)):
return ('map<%s, %s>' % tuple(map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])))
return FormatInternalLink(type_name, MessageCrossRefLabel(type_name))
if (field.type == field.TYPE_ENUM):
return FormatInternalLink(type_name, EnumCrossRefLabel(type_name))
elif field.type_name.startswith(WKT_NAMESPACE_PREFIX):
wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):]
return FormatExternalLink(wkt, ('https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()))
elif field.type_name.startswith(RPC_NAMESPACE_PREFIX):
rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):]
return FormatExternalLink(rpc, ('https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()))
elif field.type_name:
return field.type_name
pretty_type_names = {field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes'}
if (field.type in pretty_type_names):
return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar')
raise ProtodocError(('Unknown field type ' + str(field.type))) |
def StripLeadingSpace(s):
'Remove leading space in flat comment strings.'
return MapLines((lambda s: s[1:]), s) | 4,293,869,762,082,076,700 | Remove leading space in flat comment strings. | tools/protodoc/protodoc.py | StripLeadingSpace | Gsantomaggio/envoy | python | def StripLeadingSpace(s):
return MapLines((lambda s: s[1:]), s) |
def FileCrossRefLabel(msg_name):
'File cross reference label.'
return ('envoy_api_file_%s' % msg_name) | -2,295,307,785,000,013,600 | File cross reference label. | tools/protodoc/protodoc.py | FileCrossRefLabel | Gsantomaggio/envoy | python | def FileCrossRefLabel(msg_name):
return ('envoy_api_file_%s' % msg_name) |
def MessageCrossRefLabel(msg_name):
'Message cross reference label.'
return ('envoy_api_msg_%s' % msg_name) | 1,987,750,585,387,631,900 | Message cross reference label. | tools/protodoc/protodoc.py | MessageCrossRefLabel | Gsantomaggio/envoy | python | def MessageCrossRefLabel(msg_name):
return ('envoy_api_msg_%s' % msg_name) |
def EnumCrossRefLabel(enum_name):
'Enum cross reference label.'
return ('envoy_api_enum_%s' % enum_name) | -7,816,271,191,061,070,000 | Enum cross reference label. | tools/protodoc/protodoc.py | EnumCrossRefLabel | Gsantomaggio/envoy | python | def EnumCrossRefLabel(enum_name):
return ('envoy_api_enum_%s' % enum_name) |
def FieldCrossRefLabel(field_name):
'Field cross reference label.'
return ('envoy_api_field_%s' % field_name) | 2,143,585,757,202,854 | Field cross reference label. | tools/protodoc/protodoc.py | FieldCrossRefLabel | Gsantomaggio/envoy | python | def FieldCrossRefLabel(field_name):
return ('envoy_api_field_%s' % field_name) |
def EnumValueCrossRefLabel(enum_value_name):
'Enum value cross reference label.'
return ('envoy_api_enum_value_%s' % enum_value_name) | -1,175,846,287,253,893,400 | Enum value cross reference label. | tools/protodoc/protodoc.py | EnumValueCrossRefLabel | Gsantomaggio/envoy | python | def EnumValueCrossRefLabel(enum_value_name):
return ('envoy_api_enum_value_%s' % enum_value_name) |
def FormatAnchor(label):
'Format a label as an Envoy API RST anchor.'
return ('.. _%s:\n\n' % label) | -4,015,010,840,871,764,500 | Format a label as an Envoy API RST anchor. | tools/protodoc/protodoc.py | FormatAnchor | Gsantomaggio/envoy | python | def FormatAnchor(label):
return ('.. _%s:\n\n' % label) |
def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest):
'Format a FieldDescriptorProto as RST definition list item.\n\n Args:\n outer_type_context: contextual information for enclosing message.\n type_context: contextual information for message/enum/field.\n field: FieldDescriptorProto.\n protodoc_manifest: tools.protodoc.Manifest for proto.\n\n Returns:\n RST formatted definition list item.\n '
field_annotations = []
anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name)))
if field.options.HasExtension(validate_pb2.rules):
rule = field.options.Extensions[validate_pb2.rules]
if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and (rule.string.min_len > 0)) or (rule.HasField('string') and (rule.string.min_bytes > 0)) or (rule.HasField('repeated') and (rule.repeated.min_items > 0))):
field_annotations = ['*REQUIRED*']
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)
if HideNotImplemented(leading_comment):
return ''
if field.HasField('oneof_index'):
oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index])
oneof_comment = oneof_context.leading_comment
formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment)
if HideNotImplemented(oneof_comment):
return ''
if ((len(type_context.oneof_fields[field.oneof_index]) == 1) and type_context.oneof_required[field.oneof_index]):
field_annotations = ['*REQUIRED*']
if (len(type_context.oneof_fields[field.oneof_index]) > 1):
field_annotations = []
oneof_template = ('\nPrecisely one of %s must be set.\n' if type_context.oneof_required[field.oneof_index] else '\nOnly one of %s may be set.\n')
formatted_oneof_comment += (oneof_template % ', '.join((FormatInternalLink(f, FieldCrossRefLabel(NormalizeTypeContextName(outer_type_context.ExtendField(i, f).name))) for (i, f) in type_context.oneof_fields[field.oneof_index])))
else:
formatted_oneof_comment = ''
if field.options.HasExtension(security_pb2.security):
manifest_description = protodoc_manifest.fields.get(type_context.name)
if (not manifest_description):
raise ProtodocError(('Missing protodoc manifest YAML for %s' % type_context.name))
formatted_security_options = FormatSecurityOptions(field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config)
else:
formatted_security_options = ''
pretty_label_names = {field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** '}
comment = (('(%s) ' % ', '.join(([(pretty_label_names[field.label] + FormatFieldType(type_context, field))] + field_annotations))) + formatted_leading_comment)
return ((((anchor + field.name) + '\n') + MapLines(functools.partial(Indent, 2), (comment + formatted_oneof_comment))) + formatted_security_options) | -7,184,781,647,353,333,000 | Format a FieldDescriptorProto as RST definition list item.
Args:
outer_type_context: contextual information for enclosing message.
type_context: contextual information for message/enum/field.
field: FieldDescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item. | tools/protodoc/protodoc.py | FormatFieldAsDefinitionListItem | Gsantomaggio/envoy | python | def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest):
'Format a FieldDescriptorProto as RST definition list item.\n\n Args:\n outer_type_context: contextual information for enclosing message.\n type_context: contextual information for message/enum/field.\n field: FieldDescriptorProto.\n protodoc_manifest: tools.protodoc.Manifest for proto.\n\n Returns:\n RST formatted definition list item.\n '
field_annotations = []
anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name)))
if field.options.HasExtension(validate_pb2.rules):
rule = field.options.Extensions[validate_pb2.rules]
if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and (rule.string.min_len > 0)) or (rule.HasField('string') and (rule.string.min_bytes > 0)) or (rule.HasField('repeated') and (rule.repeated.min_items > 0))):
field_annotations = ['*REQUIRED*']
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)
if HideNotImplemented(leading_comment):
return
if field.HasField('oneof_index'):
oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index])
oneof_comment = oneof_context.leading_comment
formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment)
if HideNotImplemented(oneof_comment):
return
if ((len(type_context.oneof_fields[field.oneof_index]) == 1) and type_context.oneof_required[field.oneof_index]):
field_annotations = ['*REQUIRED*']
if (len(type_context.oneof_fields[field.oneof_index]) > 1):
field_annotations = []
oneof_template = ('\nPrecisely one of %s must be set.\n' if type_context.oneof_required[field.oneof_index] else '\nOnly one of %s may be set.\n')
formatted_oneof_comment += (oneof_template % ', '.join((FormatInternalLink(f, FieldCrossRefLabel(NormalizeTypeContextName(outer_type_context.ExtendField(i, f).name))) for (i, f) in type_context.oneof_fields[field.oneof_index])))
else:
formatted_oneof_comment =
if field.options.HasExtension(security_pb2.security):
manifest_description = protodoc_manifest.fields.get(type_context.name)
if (not manifest_description):
raise ProtodocError(('Missing protodoc manifest YAML for %s' % type_context.name))
formatted_security_options = FormatSecurityOptions(field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config)
else:
formatted_security_options =
pretty_label_names = {field.LABEL_OPTIONAL: , field.LABEL_REPEATED: '**repeated** '}
comment = (('(%s) ' % ', '.join(([(pretty_label_names[field.label] + FormatFieldType(type_context, field))] + field_annotations))) + formatted_leading_comment)
return ((((anchor + field.name) + '\n') + MapLines(functools.partial(Indent, 2), (comment + formatted_oneof_comment))) + formatted_security_options) |
def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest):
'Format a DescriptorProto as RST definition list.\n\n Args:\n type_context: contextual information for message/enum/field.\n msg: DescriptorProto.\n protodoc_manifest: tools.protodoc.Manifest for proto.\n\n Returns:\n RST formatted definition list item.\n '
type_context.oneof_fields = defaultdict(list)
type_context.oneof_required = defaultdict(bool)
type_context.oneof_names = defaultdict(list)
for (index, field) in enumerate(msg.field):
if field.HasField('oneof_index'):
leading_comment = type_context.ExtendField(index, field.name).leading_comment
if HideNotImplemented(leading_comment):
continue
type_context.oneof_fields[field.oneof_index].append((index, field.name))
for (index, oneof_decl) in enumerate(msg.oneof_decl):
if oneof_decl.options.HasExtension(validate_pb2.required):
type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required]
type_context.oneof_names[index] = oneof_decl.name
return ('\n'.join((FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for (index, field) in enumerate(msg.field))) + '\n') | 4,753,135,101,386,673,000 | Format a DescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
msg: DescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item. | tools/protodoc/protodoc.py | FormatMessageAsDefinitionList | Gsantomaggio/envoy | python | def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest):
'Format a DescriptorProto as RST definition list.\n\n Args:\n type_context: contextual information for message/enum/field.\n msg: DescriptorProto.\n protodoc_manifest: tools.protodoc.Manifest for proto.\n\n Returns:\n RST formatted definition list item.\n '
type_context.oneof_fields = defaultdict(list)
type_context.oneof_required = defaultdict(bool)
type_context.oneof_names = defaultdict(list)
for (index, field) in enumerate(msg.field):
if field.HasField('oneof_index'):
leading_comment = type_context.ExtendField(index, field.name).leading_comment
if HideNotImplemented(leading_comment):
continue
type_context.oneof_fields[field.oneof_index].append((index, field.name))
for (index, oneof_decl) in enumerate(msg.oneof_decl):
if oneof_decl.options.HasExtension(validate_pb2.required):
type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required]
type_context.oneof_names[index] = oneof_decl.name
return ('\n'.join((FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for (index, field) in enumerate(msg.field))) + '\n') |
def FormatEnumValueAsDefinitionListItem(type_context, enum_value):
'Format a EnumValueDescriptorProto as RST definition list item.\n\n Args:\n type_context: contextual information for message/enum/field.\n enum_value: EnumValueDescriptorProto.\n\n Returns:\n RST formatted definition list item.\n '
anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name)))
default_comment = ('*(DEFAULT)* ' if (enum_value.number == 0) else '')
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)
if HideNotImplemented(leading_comment):
return ''
comment = ((default_comment + UNICODE_INVISIBLE_SEPARATOR) + formatted_leading_comment)
return (((anchor + enum_value.name) + '\n') + MapLines(functools.partial(Indent, 2), comment)) | -5,338,880,447,968,518,000 | Format a EnumValueDescriptorProto as RST definition list item.
Args:
type_context: contextual information for message/enum/field.
enum_value: EnumValueDescriptorProto.
Returns:
RST formatted definition list item. | tools/protodoc/protodoc.py | FormatEnumValueAsDefinitionListItem | Gsantomaggio/envoy | python | def FormatEnumValueAsDefinitionListItem(type_context, enum_value):
'Format a EnumValueDescriptorProto as RST definition list item.\n\n Args:\n type_context: contextual information for message/enum/field.\n enum_value: EnumValueDescriptorProto.\n\n Returns:\n RST formatted definition list item.\n '
anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name)))
default_comment = ('*(DEFAULT)* ' if (enum_value.number == 0) else )
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)
if HideNotImplemented(leading_comment):
return
comment = ((default_comment + UNICODE_INVISIBLE_SEPARATOR) + formatted_leading_comment)
return (((anchor + enum_value.name) + '\n') + MapLines(functools.partial(Indent, 2), comment)) |
def FormatEnumAsDefinitionList(type_context, enum):
'Format a EnumDescriptorProto as RST definition list.\n\n Args:\n type_context: contextual information for message/enum/field.\n enum: DescriptorProto.\n\n Returns:\n RST formatted definition list item.\n '
return ('\n'.join((FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for (index, enum_value) in enumerate(enum.value))) + '\n') | 2,359,914,086,906,887,000 | Format a EnumDescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
enum: DescriptorProto.
Returns:
RST formatted definition list item. | tools/protodoc/protodoc.py | FormatEnumAsDefinitionList | Gsantomaggio/envoy | python | def FormatEnumAsDefinitionList(type_context, enum):
'Format a EnumDescriptorProto as RST definition list.\n\n Args:\n type_context: contextual information for message/enum/field.\n enum: DescriptorProto.\n\n Returns:\n RST formatted definition list item.\n '
return ('\n'.join((FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for (index, enum_value) in enumerate(enum.value))) + '\n') |
def FormatProtoAsBlockComment(proto):
'Format a proto as a RST block comment.\n\n Useful in debugging, not usually referenced.\n '
return (('\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto))) + '\n') | -4,856,622,305,351,986,000 | Format a proto as a RST block comment.
Useful in debugging, not usually referenced. | tools/protodoc/protodoc.py | FormatProtoAsBlockComment | Gsantomaggio/envoy | python | def FormatProtoAsBlockComment(proto):
'Format a proto as a RST block comment.\n\n Useful in debugging, not usually referenced.\n '
return (('\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto))) + '\n') |
def max_positions(self):
'Return None to allow model to dictate max sentence length allowed'
return None | -3,937,559,029,037,020,700 | Return None to allow model to dictate max sentence length allowed | pytorch_translate/tasks/pytorch_translate_multi_task.py | max_positions | Meteorix/translate | python | def max_positions(self):
return None |
def __init__(self, authtoken, authurl, user, key, tenant_name, auth_version, container_name, temp_url_key, temp_url_key2, connection_retry_count, chosen_temp_url_key):
'Init routine.'
self.requests = requests
self.authurl = authurl
self.preauthtoken = authtoken
self.user = user
self.key = key
self.auth_version = auth_version
self.container_name = container_name
self.temp_url_key = temp_url_key
self.temp_url_key_2 = temp_url_key2
self.connection_retry_count = connection_retry_count
self.chosen_temp_url_key = chosen_temp_url_key
self.conn_timeout_sec = 10
self.tenant_name = tenant_name
self.generateToken()
self.updateAccount() | -1,212,660,782,708,376,800 | Init routine. | src/config/fabric-ansible/ansible-playbooks/library/swift_fileutil.py | __init__ | edwinpjacques/contrail-controller | python | def __init__(self, authtoken, authurl, user, key, tenant_name, auth_version, container_name, temp_url_key, temp_url_key2, connection_retry_count, chosen_temp_url_key):
self.requests = requests
self.authurl = authurl
self.preauthtoken = authtoken
self.user = user
self.key = key
self.auth_version = auth_version
self.container_name = container_name
self.temp_url_key = temp_url_key
self.temp_url_key_2 = temp_url_key2
self.connection_retry_count = connection_retry_count
self.chosen_temp_url_key = chosen_temp_url_key
self.conn_timeout_sec = 10
self.tenant_name = tenant_name
self.generateToken()
self.updateAccount() |
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
'\n run case.#\n :return:\n '
self.common_run(self.testarg) | 784,258,713,962,435,200 | run case.#
:return: | tests/st/ops/ascend/vector/test_expm1_001.py | test_run | mindspore-ai/akg | python | @pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
'\n run case.#\n :return:\n '
self.common_run(self.testarg) |
def test_run_rpc_cloud(self):
'\n run case.#\n :return:\n '
self.common_run(self.testarg_rpc_cloud) | 3,985,071,734,288,005,600 | run case.#
:return: | tests/st/ops/ascend/vector/test_expm1_001.py | test_run_rpc_cloud | mindspore-ai/akg | python | def test_run_rpc_cloud(self):
'\n run case.#\n :return:\n '
self.common_run(self.testarg_rpc_cloud) |
def teardown(self):
'\n clean environment\n :return:\n '
self._log.info('============= {0} Teardown============'.format(self.casename))
return | -5,374,107,392,887,249,000 | clean environment
:return: | tests/st/ops/ascend/vector/test_expm1_001.py | teardown | mindspore-ai/akg | python | def teardown(self):
'\n clean environment\n :return:\n '
self._log.info('============= {0} Teardown============'.format(self.casename))
return |
async def async_step_user(self, user_input: (dict[(str, Any)] | None)=None) -> FlowResult:
'Handle the initial step.'
errors: dict[(str, str)] = {}
if (self._options is None):
coordinator = (await get_coordinator(self.hass))
if ((not coordinator.last_update_success) or (coordinator.data is None)):
return self.async_abort(reason='cannot_connect')
self._options = {OPTION_WORLDWIDE: 'Worldwide'}
for case in sorted(coordinator.data.values(), key=(lambda case: case.country)):
self._options[case.country] = case.country
if (user_input is not None):
(await self.async_set_unique_id(user_input['country']))
self._abort_if_unique_id_configured()
return self.async_create_entry(title=self._options[user_input['country']], data=user_input)
return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required('country'): vol.In(self._options)}), errors=errors) | -8,851,942,273,044,763,000 | Handle the initial step. | homeassistant/components/coronavirus/config_flow.py | async_step_user | bimmbo/core | python | async def async_step_user(self, user_input: (dict[(str, Any)] | None)=None) -> FlowResult:
errors: dict[(str, str)] = {}
if (self._options is None):
coordinator = (await get_coordinator(self.hass))
if ((not coordinator.last_update_success) or (coordinator.data is None)):
return self.async_abort(reason='cannot_connect')
self._options = {OPTION_WORLDWIDE: 'Worldwide'}
for case in sorted(coordinator.data.values(), key=(lambda case: case.country)):
self._options[case.country] = case.country
if (user_input is not None):
(await self.async_set_unique_id(user_input['country']))
self._abort_if_unique_id_configured()
return self.async_create_entry(title=self._options[user_input['country']], data=user_input)
return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required('country'): vol.In(self._options)}), errors=errors) |
def get_titletext_for_role(self, role):
'\n Get a short title briefly describing the given ``role``.\n Remember that the role is n Period.\n '
period = role
return period | -5,426,291,248,196,443,000 | Get a short title briefly describing the given ``role``.
Remember that the role is n Period. | devilry/devilry_admin/views/period/crinstance_period.py | get_titletext_for_role | aless80/devilry-django | python | def get_titletext_for_role(self, role):
'\n Get a short title briefly describing the given ``role``.\n Remember that the role is n Period.\n '
period = role
return period |
def get_devilryrole_for_requestuser(self):
'\n Get the devilryrole for the requesting user on the current\n period (request.cradmin_instance).\n\n The return values is the same as for\n :meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`,\n exept that this method raises ValueError if it does not find a role.\n '
if (not hasattr(self, '_devilryrole_for_requestuser')):
self._devilryrole_for_requestuser = self.__get_devilryrole_for_requestuser()
return self._devilryrole_for_requestuser | 3,701,731,663,110,944,300 | Get the devilryrole for the requesting user on the current
period (request.cradmin_instance).
The return values is the same as for
:meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`,
exept that this method raises ValueError if it does not find a role. | devilry/devilry_admin/views/period/crinstance_period.py | get_devilryrole_for_requestuser | aless80/devilry-django | python | def get_devilryrole_for_requestuser(self):
'\n Get the devilryrole for the requesting user on the current\n period (request.cradmin_instance).\n\n The return values is the same as for\n :meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`,\n exept that this method raises ValueError if it does not find a role.\n '
if (not hasattr(self, '_devilryrole_for_requestuser')):
self._devilryrole_for_requestuser = self.__get_devilryrole_for_requestuser()
return self._devilryrole_for_requestuser |
def get_text_index_string(self, text):
'\n Return a string of text containing part-of-speech, lemma pairs.\n '
if (len(text) <= 2):
text_without_punctuation = text.translate(self.punctuation_table)
if (len(text_without_punctuation) >= 1):
text = text_without_punctuation
document = self.nlp(text)
if (len(text) <= 2):
bigram_pairs = [token.lemma_.lower() for token in document]
tokens = [ele for ele in bigram_pairs]
else:
tokens = [token for token in document if (token.is_alpha and (not token.is_stop))]
if (len(tokens) < 2):
tokens = [token for token in document if token.is_alpha]
tokens = [token.lemma_.lower() for token in tokens]
return ' '.join(tokens) | 7,772,530,436,711,282,000 | Return a string of text containing part-of-speech, lemma pairs. | app/chatterbot/tagging.py | get_text_index_string | Jack2313/WeChatterBot | python | def get_text_index_string(self, text):
'\n \n '
if (len(text) <= 2):
text_without_punctuation = text.translate(self.punctuation_table)
if (len(text_without_punctuation) >= 1):
text = text_without_punctuation
document = self.nlp(text)
if (len(text) <= 2):
bigram_pairs = [token.lemma_.lower() for token in document]
tokens = [ele for ele in bigram_pairs]
else:
tokens = [token for token in document if (token.is_alpha and (not token.is_stop))]
if (len(tokens) < 2):
tokens = [token for token in document if token.is_alpha]
tokens = [token.lemma_.lower() for token in tokens]
return ' '.join(tokens) |
def my_worker_splitter(urls):
'Split urls per worker\n Selects a subset of urls based on Torch get_worker_info.\n Used as a shard selection function in Dataset.\n replaces wds.split_by_worker'
urls = [url for url in urls]
assert isinstance(urls, list)
worker_info = torch.utils.data.get_worker_info()
if (worker_info is not None):
wid = worker_info.id
num_workers = worker_info.num_workers
return urls[wid::num_workers]
else:
return urls | 8,415,511,833,976,402,000 | Split urls per worker
Selects a subset of urls based on Torch get_worker_info.
Used as a shard selection function in Dataset.
replaces wds.split_by_worker | test_train_mp_wds_local.py | my_worker_splitter | mlexample/gcspytorchimagenet | python | def my_worker_splitter(urls):
'Split urls per worker\n Selects a subset of urls based on Torch get_worker_info.\n Used as a shard selection function in Dataset.\n replaces wds.split_by_worker'
urls = [url for url in urls]
assert isinstance(urls, list)
worker_info = torch.utils.data.get_worker_info()
if (worker_info is not None):
wid = worker_info.id
num_workers = worker_info.num_workers
return urls[wid::num_workers]
else:
return urls |
def my_node_splitter(urls):
'Split urls_ correctly per accelerator node\n :param urls:\n :return: slice of urls_\n '
rank = xm.get_ordinal()
num_replicas = xm.xrt_world_size()
urls_this = urls[rank::num_replicas]
return urls_this | -7,302,456,343,695,276,000 | Split urls_ correctly per accelerator node
:param urls:
:return: slice of urls_ | test_train_mp_wds_local.py | my_node_splitter | mlexample/gcspytorchimagenet | python | def my_node_splitter(urls):
'Split urls_ correctly per accelerator node\n :param urls:\n :return: slice of urls_\n '
rank = xm.get_ordinal()
num_replicas = xm.xrt_world_size()
urls_this = urls[rank::num_replicas]
return urls_this |
def testFieldDescriptionUpdatedEvent(self):
'Test FieldDescriptionUpdatedEvent'
pass | 7,405,285,144,728,888,000 | Test FieldDescriptionUpdatedEvent | python/test/test_field_description_updated_event.py | testFieldDescriptionUpdatedEvent | dlens/dlxapi | python | def testFieldDescriptionUpdatedEvent(self):
pass |
def __attrs_post_init__(self) -> None:
'Register events.'
self.register_event(self.on_selected) | -4,143,778,683,437,158,400 | Register events. | earwax/menus/menu_item.py | __attrs_post_init__ | chrisnorman7/earwax | python | def __attrs_post_init__(self) -> None:
self.register_event(self.on_selected) |
def get_title(self) -> Optional[str]:
'Return the proper title of this object.\n\n If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable,\n its return value will be returned.\n '
if callable(self.title):
return self.title()
return self.title | 2,710,188,890,296,733,000 | Return the proper title of this object.
If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable,
its return value will be returned. | earwax/menus/menu_item.py | get_title | chrisnorman7/earwax | python | def get_title(self) -> Optional[str]:
'Return the proper title of this object.\n\n If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable,\n its return value will be returned.\n '
if callable(self.title):
return self.title()
return self.title |
def on_selected(self) -> None:
'Handle this menu item being selected.'
pass | -2,197,262,279,189,630,500 | Handle this menu item being selected. | earwax/menus/menu_item.py | on_selected | chrisnorman7/earwax | python | def on_selected(self) -> None:
pass |
def all_discrete(comp_dists):
'\n Determine if all distributions in comp_dists are discrete\n '
if isinstance(comp_dists, Distribution):
return isinstance(comp_dists, Discrete)
else:
return all((isinstance(comp_dist, Discrete) for comp_dist in comp_dists)) | -2,878,885,385,310,497,000 | Determine if all distributions in comp_dists are discrete | pymc3/distributions/mixture.py | all_discrete | himkt/pymc3 | python | def all_discrete(comp_dists):
'\n \n '
if isinstance(comp_dists, Distribution):
return isinstance(comp_dists, Discrete)
else:
return all((isinstance(comp_dist, Discrete) for comp_dist in comp_dists)) |
def __init__(self):
'\n todo: this. what fields does it need?\n '
pass | -683,415,658,845,593,700 | todo: this. what fields does it need? | src/models/Massey.py | __init__ | alhart2015/march-madness | python | def __init__(self):
'\n \n '
pass |
def rank(self) -> List[Team]:
'\n Given a matrix, create a power ranking of the teams\n '
pass | 1,653,216,557,902,713,300 | Given a matrix, create a power ranking of the teams | src/models/Massey.py | rank | alhart2015/march-madness | python | def rank(self) -> List[Team]:
'\n \n '
pass |
def predict_bracket(self) -> Bracket:
'\n Given a ranking of the teams, and the draw for the bracket, predict who wins and stuff\n '
pass | 2,216,859,857,913,330,200 | Given a ranking of the teams, and the draw for the bracket, predict who wins and stuff | src/models/Massey.py | predict_bracket | alhart2015/march-madness | python | def predict_bracket(self) -> Bracket:
'\n \n '
pass |
@staticmethod
def from_file(filename: str) -> Massey:
'\n todo: docs\n todo: weighting param?\n\n parse teams and games from file\n create matrix from teams and games\n '
pass | 4,295,603,889,069,199,400 | todo: docs
todo: weighting param?
parse teams and games from file
create matrix from teams and games | src/models/Massey.py | from_file | alhart2015/march-madness | python | @staticmethod
def from_file(filename: str) -> Massey:
'\n todo: docs\n todo: weighting param?\n\n parse teams and games from file\n create matrix from teams and games\n '
pass |
def linear_warmup_and_cosine_protocol(f_values: Tuple[(float, float, float)], x_milestones: Tuple[(int, int, int, int)]):
'\n There are 5 regions:\n 1. constant at f0 for x < x0\n 2. linear increase from f0 to f1 for x0 < x < x1\n 3. constant at f1 for x1 < x < x2\n 4. cosine protocol from f1 to f2 for x2 < x < x3\n 5. constant at f2 for x > x3\n\n If you want a linear_ramp followed by a cosine_decay only simply set:\n 1. x0=0 (to eliminate the first constant piece)\n 2. x2=x1 (to eliminate the second constant piece)\n 3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay)\n '
assert (x_milestones[0] <= x_milestones[1] <= x_milestones[2] <= x_milestones[3])
def fn(step):
if (step <= x_milestones[0]):
return float(f_values[0])
elif ((step > x_milestones[0]) and (step <= x_milestones[1])):
m = (float((f_values[1] - f_values[0])) / float(max(1, (x_milestones[1] - x_milestones[0]))))
return (float(f_values[0]) + (m * float((step - x_milestones[0]))))
elif ((step > x_milestones[1]) and (step <= x_milestones[2])):
return float(f_values[1])
elif ((step > x_milestones[2]) and (step <= x_milestones[3])):
progress = (float((step - x_milestones[2])) / float(max(1, (x_milestones[3] - x_milestones[2]))))
tmp = (0.5 * (1.0 + math.cos((math.pi * progress))))
return (float(f_values[2]) + (tmp * float((f_values[1] - f_values[2]))))
else:
return float(f_values[2])
return fn | 5,977,864,187,232,458,000 | There are 5 regions:
1. constant at f0 for x < x0
2. linear increase from f0 to f1 for x0 < x < x1
3. constant at f1 for x1 < x < x2
4. cosine protocol from f1 to f2 for x2 < x < x3
5. constant at f2 for x > x3
If you want a linear_ramp followed by a cosine_decay only simply set:
1. x0=0 (to eliminate the first constant piece)
2. x2=x1 (to eliminate the second constant piece)
3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay) | src/tissue_purifier/models/_optim_scheduler.py | linear_warmup_and_cosine_protocol | broadinstitute/tissue_purifier | python | def linear_warmup_and_cosine_protocol(f_values: Tuple[(float, float, float)], x_milestones: Tuple[(int, int, int, int)]):
'\n There are 5 regions:\n 1. constant at f0 for x < x0\n 2. linear increase from f0 to f1 for x0 < x < x1\n 3. constant at f1 for x1 < x < x2\n 4. cosine protocol from f1 to f2 for x2 < x < x3\n 5. constant at f2 for x > x3\n\n If you want a linear_ramp followed by a cosine_decay only simply set:\n 1. x0=0 (to eliminate the first constant piece)\n 2. x2=x1 (to eliminate the second constant piece)\n 3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay)\n '
assert (x_milestones[0] <= x_milestones[1] <= x_milestones[2] <= x_milestones[3])
def fn(step):
if (step <= x_milestones[0]):
return float(f_values[0])
elif ((step > x_milestones[0]) and (step <= x_milestones[1])):
m = (float((f_values[1] - f_values[0])) / float(max(1, (x_milestones[1] - x_milestones[0]))))
return (float(f_values[0]) + (m * float((step - x_milestones[0]))))
elif ((step > x_milestones[1]) and (step <= x_milestones[2])):
return float(f_values[1])
elif ((step > x_milestones[2]) and (step <= x_milestones[3])):
progress = (float((step - x_milestones[2])) / float(max(1, (x_milestones[3] - x_milestones[2]))))
tmp = (0.5 * (1.0 + math.cos((math.pi * progress))))
return (float(f_values[2]) + (tmp * float((f_values[1] - f_values[2]))))
else:
return float(f_values[2])
return fn |
@torch.no_grad()
def step(self, closure=None):
'Performs a single optimization step.\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if (p.grad is None):
continue
d_p = p.grad
p_norm = torch.norm(p.data)
g_norm = torch.norm(p.grad.data)
if (weight_decay != 0):
if ((p_norm != 0) and (g_norm != 0)):
lars_lr = (p_norm / ((g_norm + (p_norm * weight_decay)) + group['eps']))
lars_lr *= group['trust_coefficient']
d_p = d_p.add(p, alpha=weight_decay)
d_p *= lars_lr
if (momentum != 0):
param_state = self.state[p]
if ('momentum_buffer' not in param_state):
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=(1 - dampening))
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=(- group['lr']))
return loss | -4,350,163,514,070,495,000 | Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss. | src/tissue_purifier/models/_optim_scheduler.py | step | broadinstitute/tissue_purifier | python | @torch.no_grad()
def step(self, closure=None):
'Performs a single optimization step.\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if (p.grad is None):
continue
d_p = p.grad
p_norm = torch.norm(p.data)
g_norm = torch.norm(p.grad.data)
if (weight_decay != 0):
if ((p_norm != 0) and (g_norm != 0)):
lars_lr = (p_norm / ((g_norm + (p_norm * weight_decay)) + group['eps']))
lars_lr *= group['trust_coefficient']
d_p = d_p.add(p, alpha=weight_decay)
d_p *= lars_lr
if (momentum != 0):
param_state = self.state[p]
if ('momentum_buffer' not in param_state):
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=(1 - dampening))
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=(- group['lr']))
return loss |
def self_generate(output_filename, filename='iso3166-1.csv'):
'\n The following code can be used for self-generation of this file.\n\n It requires a UTF-8 CSV file containing the short ISO name and two letter\n country code as the first two columns.\n '
import csv
import re
countries = []
alt_codes = []
with open(filename, 'rb') as csv_file:
for row in csv.reader(csv_file):
name = row[0].decode('utf-8').rstrip('*')
name = re.sub('\\(the\\)', '', name)
if name:
countries.append((name, row[1].decode('utf-8')))
alt_codes.append((row[1].decode('utf-8'), row[2].decode('utf-8'), int(row[3])))
with open(__file__, 'r') as source_file:
contents = source_file.read()
bits = re.match('(.*\nCOUNTRIES = \\{\n)(.*?)(\n\\}.*)', contents, re.DOTALL).groups()
country_list = []
for (name, code) in countries:
name = name.replace('"', '\\"').strip()
country_list.append(' "{code}": _("{name}"),'.format(name=name, code=code))
content = bits[0]
content += '\n'.join(country_list).encode('utf-8')
alt_bits = re.match('(.*\nALT_CODES = \\{\n)(.*)(\n\\}.*)', bits[2], re.DOTALL).groups()
alt_list = []
for (code, code3, codenum) in alt_codes:
name = name.replace('"', '\\"').strip()
alt_list.append(' "{code}": ("{code3}", {codenum}),'.format(code=code, code3=code3, codenum=codenum))
content += alt_bits[0]
content += '\n'.join(alt_list).encode('utf-8')
content += alt_bits[2]
with open(output_filename, 'wb') as output_file:
output_file.write(content)
return countries | 7,161,760,376,718,209,000 | The following code can be used for self-generation of this file.
It requires a UTF-8 CSV file containing the short ISO name and two letter
country code as the first two columns. | django_countries/data.py | self_generate | Bounder/django-countries | python | def self_generate(output_filename, filename='iso3166-1.csv'):
'\n The following code can be used for self-generation of this file.\n\n It requires a UTF-8 CSV file containing the short ISO name and two letter\n country code as the first two columns.\n '
import csv
import re
countries = []
alt_codes = []
with open(filename, 'rb') as csv_file:
for row in csv.reader(csv_file):
name = row[0].decode('utf-8').rstrip('*')
name = re.sub('\\(the\\)', , name)
if name:
countries.append((name, row[1].decode('utf-8')))
alt_codes.append((row[1].decode('utf-8'), row[2].decode('utf-8'), int(row[3])))
with open(__file__, 'r') as source_file:
contents = source_file.read()
bits = re.match('(.*\nCOUNTRIES = \\{\n)(.*?)(\n\\}.*)', contents, re.DOTALL).groups()
country_list = []
for (name, code) in countries:
name = name.replace('"', '\\"').strip()
country_list.append(' "{code}": _("{name}"),'.format(name=name, code=code))
content = bits[0]
content += '\n'.join(country_list).encode('utf-8')
alt_bits = re.match('(.*\nALT_CODES = \\{\n)(.*)(\n\\}.*)', bits[2], re.DOTALL).groups()
alt_list = []
for (code, code3, codenum) in alt_codes:
name = name.replace('"', '\\"').strip()
alt_list.append(' "{code}": ("{code3}", {codenum}),'.format(code=code, code3=code3, codenum=codenum))
content += alt_bits[0]
content += '\n'.join(alt_list).encode('utf-8')
content += alt_bits[2]
with open(output_filename, 'wb') as output_file:
output_file.write(content)
return countries |
def test_positive_without_instance_attr(self):
' Test if the target class without a singleton attribute. '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
pass
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2) | -2,292,913,109,432,330,800 | Test if the target class without a singleton attribute. | test/ut/test_decorator_common_singleton.py | test_positive_without_instance_attr | shiroyuki/Tori | python | def test_positive_without_instance_attr(self):
' '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
pass
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2) |
def test_positive_using_decorator_with_primitive_parameters(self):
' Test if the target class without a singleton attribute but using a decorator with primitive parameters. '
try:
@singleton(10)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, init_number):
super(self.__class__, self).__init__()
self.number = init_number
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 11)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 12) | 377,191,850,248,781,300 | Test if the target class without a singleton attribute but using a decorator with primitive parameters. | test/ut/test_decorator_common_singleton.py | test_positive_using_decorator_with_primitive_parameters | shiroyuki/Tori | python | def test_positive_using_decorator_with_primitive_parameters(self):
' '
try:
@singleton(10)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, init_number):
super(self.__class__, self).__init__()
self.number = init_number
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 11)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 12) |
def test_positive_for_normal_singleton_with_parameters(self):
' Positive test for @singleton with parameters provided for the constructor '
try:
class SampleDependencyInjection(object):
pass
sample_di = SampleDependencyInjection()
@singleton(sample_di)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2)
self.assertIsInstance(SuperDummyClass.instance().dependency_injection, SampleDependencyInjection) | 3,776,731,983,615,224,300 | Positive test for @singleton with parameters provided for the constructor | test/ut/test_decorator_common_singleton.py | test_positive_for_normal_singleton_with_parameters | shiroyuki/Tori | python | def test_positive_for_normal_singleton_with_parameters(self):
' '
try:
class SampleDependencyInjection(object):
pass
sample_di = SampleDependencyInjection()
@singleton(sample_di)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2)
self.assertIsInstance(SuperDummyClass.instance().dependency_injection, SampleDependencyInjection) |
def test_negative_for_normal_singleton_with_class_reference(self):
' Negative test for @singleton with class_reference provided for the constructor '
try:
class SampleDependencyInjection(object):
pass
@singleton(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection
self.assertTrue(False, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known-yet-unexpected exception.')
except TypeError:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.') | -1,487,090,186,829,476,600 | Negative test for @singleton with class_reference provided for the constructor | test/ut/test_decorator_common_singleton.py | test_negative_for_normal_singleton_with_class_reference | shiroyuki/Tori | python | def test_negative_for_normal_singleton_with_class_reference(self):
' '
try:
class SampleDependencyInjection(object):
pass
@singleton(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection
self.assertTrue(False, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known-yet-unexpected exception.')
except TypeError:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.') |
def test_positive_for_singleton_with(self):
' Positive test for @singleton_with(*args, **kwargs) '
try:
class SampleDependencyInjection(object):
pass
@singleton_with(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection()
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2)
self.assertIsInstance(SuperDummyClass.instance().dependency_injection, SampleDependencyInjection) | -7,476,538,399,266,750,000 | Positive test for @singleton_with(*args, **kwargs) | test/ut/test_decorator_common_singleton.py | test_positive_for_singleton_with | shiroyuki/Tori | python | def test_positive_for_singleton_with(self):
' '
try:
class SampleDependencyInjection(object):
pass
@singleton_with(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection()
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2)
self.assertIsInstance(SuperDummyClass.instance().dependency_injection, SampleDependencyInjection) |
def test_negative_with_existed_singleton_instance(self):
' Test if the target class is with null singleton attribute. '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = None
def __init__(self):
super(self.__class__, self).__init__()
self.assertTrue(False, 'Singleton Class: Passed the initialization unexpectedly.')
except SingletonInitializationException:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.') | 7,762,687,949,380,224,000 | Test if the target class is with null singleton attribute. | test/ut/test_decorator_common_singleton.py | test_negative_with_existed_singleton_instance | shiroyuki/Tori | python | def test_negative_with_existed_singleton_instance(self):
' '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = None
def __init__(self):
super(self.__class__, self).__init__()
self.assertTrue(False, 'Singleton Class: Passed the initialization unexpectedly.')
except SingletonInitializationException:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.') |
def test_negative_with_unexpected_instance_attr(self):
' Test if the target class has already had an attribute `_singleton_instance` but it is not compatible. '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = {}
def __init__(self):
super(self.__class__, self).__init__()
self.assertTrue(False, 'Singleton Class: Passed the initialization unexpectedly.')
except SingletonInitializationException:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.') | -3,037,646,351,450,630,000 | Test if the target class has already had an attribute `_singleton_instance` but it is not compatible. | test/ut/test_decorator_common_singleton.py | test_negative_with_unexpected_instance_attr | shiroyuki/Tori | python | def test_negative_with_unexpected_instance_attr(self):
' '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = {}
def __init__(self):
super(self.__class__, self).__init__()
self.assertTrue(False, 'Singleton Class: Passed the initialization unexpectedly.')
except SingletonInitializationException:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.') |
def match(given, definition):
'Returns true if a given argument matches the definition.'
if isinstance(definition, Variable):
return True
return (definition == given) | 758,407,112,162,061,200 | Returns true if a given argument matches the definition. | lib/statement.py | match | bendmorris/beaver | python | def match(given, definition):
if isinstance(definition, Variable):
return True
return (definition == given) |
def replace(self, *varsets):
'Checks each part of the statement against defined variables. If any\n matches are found, the statement is updated. If the statement is a function\n call, a new set of statements is returned; otherwise, None is returned.'
matched = False
subj = self.subject
if isinstance(subj, Variable):
(result, new_match) = def_match(subj, varsets)
if result:
self.subject = new_match
return self.replace(*varsets)
for (n, (verb, objects)) in enumerate(self.verb_objects):
if isinstance(verb, Variable):
(result, new_match) = def_match(verb, varsets)
if result:
(v, o) = self.verb_objects[n]
self.verb_objects[n] = (new_match, o)
return self.replace(*varsets)
for (m, obj) in enumerate(objects):
if isinstance(obj, Variable):
(result, new_match) = def_match(obj, varsets)
if result:
objects[m] = new_match
return self.replace(*varsets)
return None | 1,030,962,870,911,389,000 | Checks each part of the statement against defined variables. If any
matches are found, the statement is updated. If the statement is a function
call, a new set of statements is returned; otherwise, None is returned. | lib/statement.py | replace | bendmorris/beaver | python | def replace(self, *varsets):
'Checks each part of the statement against defined variables. If any\n matches are found, the statement is updated. If the statement is a function\n call, a new set of statements is returned; otherwise, None is returned.'
matched = False
subj = self.subject
if isinstance(subj, Variable):
(result, new_match) = def_match(subj, varsets)
if result:
self.subject = new_match
return self.replace(*varsets)
for (n, (verb, objects)) in enumerate(self.verb_objects):
if isinstance(verb, Variable):
(result, new_match) = def_match(verb, varsets)
if result:
(v, o) = self.verb_objects[n]
self.verb_objects[n] = (new_match, o)
return self.replace(*varsets)
for (m, obj) in enumerate(objects):
if isinstance(obj, Variable):
(result, new_match) = def_match(obj, varsets)
if result:
objects[m] = new_match
return self.replace(*varsets)
return None |
def __init__(self, id, full_shard_id_list, root_tip):
' Empty full_shard_id_list means root '
if isinstance(id, bytes):
self.id = id
else:
self.id = bytes(id, 'ascii')
self.full_shard_id_list = full_shard_id_list
self.root_tip = root_tip | 3,254,462,930,744,925,700 | Empty full_shard_id_list means root | quarkchain/cluster/rpc.py | __init__ | QuarkChain/pyquarkchain | python | def __init__(self, id, full_shard_id_list, root_tip):
' '
if isinstance(id, bytes):
self.id = id
else:
self.id = bytes(id, 'ascii')
self.full_shard_id_list = full_shard_id_list
self.root_tip = root_tip |
def __init__(self, id, full_shard_id_list):
' Empty slave_id and full_shard_id_list means root '
if isinstance(id, bytes):
self.id = id
else:
self.id = bytes(id, 'ascii')
self.full_shard_id_list = full_shard_id_list | 5,757,208,166,312,482,000 | Empty slave_id and full_shard_id_list means root | quarkchain/cluster/rpc.py | __init__ | QuarkChain/pyquarkchain | python | def __init__(self, id, full_shard_id_list):
' '
if isinstance(id, bytes):
self.id = id
else:
self.id = bytes(id, 'ascii')
self.full_shard_id_list = full_shard_id_list |
def fitnessFunction(chromosome):
'\n Given a "chromosome", this function must determine its fitness score\n The fitness score should be a floating point value. If the fitness is zero or smaller\n then the chromosome will not be allowed to "reproduce"\n '
a = chromosome['a']
b = chromosome['b']
c = chromosome['c']
d = chromosome['d']
val1 = math.fabs(((((a + b) + c) + d) - 17))
val2 = math.fabs(((math.pow(a, 2) + math.pow(b, 2)) - 5))
val3 = (((math.sin(a) + c) - d) - 20)
dist = math.sqrt(((math.pow(val1, 2) + math.pow(val2, 2)) + math.pow(val3, 2)))
if (dist != 0):
return (1 / dist)
else:
return None | 302,749,153,459,521,800 | Given a "chromosome", this function must determine its fitness score
The fitness score should be a floating point value. If the fitness is zero or smaller
then the chromosome will not be allowed to "reproduce" | examples/EquationSolver_simple.py | fitnessFunction | littley/pyvolution | python | def fitnessFunction(chromosome):
'\n Given a "chromosome", this function must determine its fitness score\n The fitness score should be a floating point value. If the fitness is zero or smaller\n then the chromosome will not be allowed to "reproduce"\n '
a = chromosome['a']
b = chromosome['b']
c = chromosome['c']
d = chromosome['d']
val1 = math.fabs(((((a + b) + c) + d) - 17))
val2 = math.fabs(((math.pow(a, 2) + math.pow(b, 2)) - 5))
val3 = (((math.sin(a) + c) - d) - 20)
dist = math.sqrt(((math.pow(val1, 2) + math.pow(val2, 2)) + math.pow(val3, 2)))
if (dist != 0):
return (1 / dist)
else:
return None |
def equation_checker(equation):
'\n Check equation for balanced parentheses\n\n Args:\n equation(string): String form of equation\n Returns:\n bool: Return if parentheses are balanced or not\n '
opening_parenthesis = Stack()
closing_parenthesis = Stack()
for _ in equation:
if (_ == '('):
opening_parenthesis.push(_)
elif (_ == ')'):
closing_parenthesis.push(_)
return (opening_parenthesis.size() == closing_parenthesis.size()) | 4,243,911,488,360,488,000 | Check equation for balanced parentheses
Args:
equation(string): String form of equation
Returns:
bool: Return if parentheses are balanced or not | 3. data_structures/stack/balanced_parantheses.py | equation_checker | m-01101101/udacity-datastructures-algorithms | python | def equation_checker(equation):
'\n Check equation for balanced parentheses\n\n Args:\n equation(string): String form of equation\n Returns:\n bool: Return if parentheses are balanced or not\n '
opening_parenthesis = Stack()
closing_parenthesis = Stack()
for _ in equation:
if (_ == '('):
opening_parenthesis.push(_)
elif (_ == ')'):
closing_parenthesis.push(_)
return (opening_parenthesis.size() == closing_parenthesis.size()) |
def _equation_checker(equation):
'\n Check equation for balanced parentheses\n '
return (equation.count('(') == equation.count(')')) | -2,766,931,488,167,891,000 | Check equation for balanced parentheses | 3. data_structures/stack/balanced_parantheses.py | _equation_checker | m-01101101/udacity-datastructures-algorithms | python | def _equation_checker(equation):
'\n \n '
return (equation.count('(') == equation.count(')')) |
@inject
def load(bento_tag: t.Union[(str, Tag)], tags: t.Optional[t.List[str]]=None, options: t.Optional['tf_ext.SaveOptions']=None, load_as_hub_module: t.Optional[bool]=None, model_store: 'ModelStore'=Provide[BentoMLContainer.model_store]) -> t.Union[('tf_ext.AutoTrackable', 'tf_ext.Module', 'HubModule', 'KerasLayer')]:
'\n Load a model from BentoML local modelstore with given name.\n\n Args:\n bento_tag (:code:`Union[str, Tag]`):\n Tag of a saved model in BentoML local modelstore.\n tags (:code:`str`, `optional`, defaults to `None`):\n A set of strings specifying the graph variant to use, if loading from a v1 module.\n options (:code:`tensorflow.saved_model.SaveOptions`, `optional`, default to :code:`None`):\n :code:`tensorflow.saved_model.LoadOptions` object that specifies options for loading. This\n argument can only be used from TensorFlow 2.3 onwards.\n load_as_hub_module (`bool`, `optional`, default to :code:`True`):\n Load the given weight that is saved from tfhub as either `hub.KerasLayer` or `hub.Module`.\n The latter only applies for TF1.\n model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):\n BentoML modelstore, provided by DI Container.\n\n Returns:\n :obj:`SavedModel`: an instance of :obj:`SavedModel` format from BentoML modelstore.\n\n Examples:\n\n .. code-block:: python\n\n import bentoml\n\n # load a model back into memory\n model = bentoml.tensorflow.load("my_tensorflow_model")\n\n '
model = model_store.get(bento_tag)
if (model.info.module not in (MODULE_NAME, __name__)):
raise BentoMLException(f'Model {bento_tag} was saved with module {model.info.module}, failed loading with {MODULE_NAME}.')
if model.info.context['import_from_tfhub']:
assert (load_as_hub_module is not None), 'You have to specified `load_as_hub_module=True | False` to load a `tensorflow_hub` module. If True is chosen, then BentoML will return either an instance of `hub.KerasLayer` or `hub.Module` depending on your TF version. For most usecase, we recommend to keep `load_as_hub_module=True`. If you wish to extend the functionalities of the given model, set `load_as_hub_module=False` will return a SavedModel object.'
if (hub is None):
raise MissingDependencyException(' `tensorflow_hub` does not exists.\n Make sure to `pip install --upgrade tensorflow_hub` before using.\n ')
module_path = model.path_of(model.info.options['local_path'])
if load_as_hub_module:
return (hub.Module(module_path) if get_tf_version().startswith('1') else hub.KerasLayer(module_path))
is_hub_module_v1: bool = tf.io.gfile.exists(native_module.get_module_proto_path(module_path))
if ((tags is None) and is_hub_module_v1):
tags = []
if (options is not None):
if (not LazyType('tensorflow.python.saved_model.save_options.SaveOptions').isinstance(options)):
raise BentoMLException(f'`options` has to be of type `tf.saved_model.SaveOptions`, got {type(options)} instead.')
if (not hasattr(getattr(tf, 'saved_model', None), 'LoadOptions')):
raise NotImplementedError(f'options are not supported for TF < 2.3.x, Current version: {get_tf_version()}')
tf_model: 'tf_ext.AutoTrackable' = tf.compat.v1.saved_model.load_v2(module_path, tags=tags, options=options)
else:
tf_model: 'tf_ext.AutoTrackable' = tf.compat.v1.saved_model.load_v2(module_path, tags=tags)
tf_model._is_hub_module_v1 = is_hub_module_v1
return tf_model
else:
tf_model: 'tf_ext.AutoTrackable' = tf.compat.v1.saved_model.load_v2(model.path)
return hook_loaded_model(tf_model, MODULE_NAME) | -5,209,461,294,918,116,000 | Load a model from BentoML local modelstore with given name.
Args:
bento_tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
tags (:code:`str`, `optional`, defaults to `None`):
A set of strings specifying the graph variant to use, if loading from a v1 module.
options (:code:`tensorflow.saved_model.SaveOptions`, `optional`, default to :code:`None`):
:code:`tensorflow.saved_model.LoadOptions` object that specifies options for loading. This
argument can only be used from TensorFlow 2.3 onwards.
load_as_hub_module (`bool`, `optional`, default to :code:`True`):
Load the given weight that is saved from tfhub as either `hub.KerasLayer` or `hub.Module`.
The latter only applies for TF1.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`SavedModel`: an instance of :obj:`SavedModel` format from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
# load a model back into memory
model = bentoml.tensorflow.load("my_tensorflow_model") | bentoml/_internal/frameworks/tensorflow_v2.py | load | almirb/BentoML | python | @inject
def load(bento_tag: t.Union[(str, Tag)], tags: t.Optional[t.List[str]]=None, options: t.Optional['tf_ext.SaveOptions']=None, load_as_hub_module: t.Optional[bool]=None, model_store: 'ModelStore'=Provide[BentoMLContainer.model_store]) -> t.Union[('tf_ext.AutoTrackable', 'tf_ext.Module', 'HubModule', 'KerasLayer')]:
'\n Load a model from BentoML local modelstore with given name.\n\n Args:\n bento_tag (:code:`Union[str, Tag]`):\n Tag of a saved model in BentoML local modelstore.\n tags (:code:`str`, `optional`, defaults to `None`):\n A set of strings specifying the graph variant to use, if loading from a v1 module.\n options (:code:`tensorflow.saved_model.SaveOptions`, `optional`, default to :code:`None`):\n :code:`tensorflow.saved_model.LoadOptions` object that specifies options for loading. This\n argument can only be used from TensorFlow 2.3 onwards.\n load_as_hub_module (`bool`, `optional`, default to :code:`True`):\n Load the given weight that is saved from tfhub as either `hub.KerasLayer` or `hub.Module`.\n The latter only applies for TF1.\n model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):\n BentoML modelstore, provided by DI Container.\n\n Returns:\n :obj:`SavedModel`: an instance of :obj:`SavedModel` format from BentoML modelstore.\n\n Examples:\n\n .. code-block:: python\n\n import bentoml\n\n # load a model back into memory\n model = bentoml.tensorflow.load("my_tensorflow_model")\n\n '
model = model_store.get(bento_tag)
if (model.info.module not in (MODULE_NAME, __name__)):
raise BentoMLException(f'Model {bento_tag} was saved with module {model.info.module}, failed loading with {MODULE_NAME}.')
if model.info.context['import_from_tfhub']:
assert (load_as_hub_module is not None), 'You have to specified `load_as_hub_module=True | False` to load a `tensorflow_hub` module. If True is chosen, then BentoML will return either an instance of `hub.KerasLayer` or `hub.Module` depending on your TF version. For most usecase, we recommend to keep `load_as_hub_module=True`. If you wish to extend the functionalities of the given model, set `load_as_hub_module=False` will return a SavedModel object.'
if (hub is None):
raise MissingDependencyException(' `tensorflow_hub` does not exists.\n Make sure to `pip install --upgrade tensorflow_hub` before using.\n ')
module_path = model.path_of(model.info.options['local_path'])
if load_as_hub_module:
return (hub.Module(module_path) if get_tf_version().startswith('1') else hub.KerasLayer(module_path))
is_hub_module_v1: bool = tf.io.gfile.exists(native_module.get_module_proto_path(module_path))
if ((tags is None) and is_hub_module_v1):
tags = []
if (options is not None):
if (not LazyType('tensorflow.python.saved_model.save_options.SaveOptions').isinstance(options)):
raise BentoMLException(f'`options` has to be of type `tf.saved_model.SaveOptions`, got {type(options)} instead.')
if (not hasattr(getattr(tf, 'saved_model', None), 'LoadOptions')):
raise NotImplementedError(f'options are not supported for TF < 2.3.x, Current version: {get_tf_version()}')
tf_model: 'tf_ext.AutoTrackable' = tf.compat.v1.saved_model.load_v2(module_path, tags=tags, options=options)
else:
tf_model: 'tf_ext.AutoTrackable' = tf.compat.v1.saved_model.load_v2(module_path, tags=tags)
tf_model._is_hub_module_v1 = is_hub_module_v1
return tf_model
else:
tf_model: 'tf_ext.AutoTrackable' = tf.compat.v1.saved_model.load_v2(model.path)
return hook_loaded_model(tf_model, MODULE_NAME) |
@inject
def import_from_tfhub(identifier: t.Union[(str, 'HubModule', 'KerasLayer')], name: t.Optional[str]=None, labels: t.Optional[t.Dict[(str, str)]]=None, custom_objects: t.Optional[t.Dict[(str, t.Any)]]=None, metadata: t.Optional[t.Dict[(str, t.Any)]]=None, model_store: 'ModelStore'=Provide[BentoMLContainer.model_store]) -> Tag:
'\n Import a model from `Tensorflow Hub <https://tfhub.dev/>`_ to BentoML modelstore.\n\n Args:\n identifier (:code:`Union[str, tensorflow_hub.Module, tensorflow_hub.KerasLayer]`): Identifier accepts\n two type of inputs:\n - if `type` of :code:`identifier` either of type :code:`tensorflow_hub.Module` (**legacy** `tensorflow_hub`) or :code:`tensorflow_hub.KerasLayer` (`tensorflow_hub`), then we will save the given model to a :code:`SavedModel` format.\n - if `type` of :code:`identifier` is a :obj:`str`, we assume that this is the URI retrieved from Tensorflow Hub. We then clean the given URI, and get a local copy of a given model to BentoML modelstore. name (:code:`str`, `optional`, defaults to `None`): An optional name for the model. If :code:`identifier` is a :obj:`str`, then name can be autogenerated from the given URI.\n name (:code:`str`, `optional`, default to `None`):\n Optional name for the saved model. If None, then name will be generated from :code:`identifier`.\n labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):\n user-defined labels for managing models, e.g. team=nlp, stage=dev\n custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):\n user-defined additional python objects to be saved alongside the model,\n e.g. a tokenizer instance, preprocessor function, model configuration json\n metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):\n Custom metadata for given model.\n model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):\n BentoML modelstore, provided by DI Container.\n\n Returns:\n :obj:`~bentoml.Tag`: A :obj:`~bentoml.Tag` object that can be used to retrieve the model with :func:`bentoml.tensorflow.load`:\n\n Example for importing a model from Tensorflow Hub:\n\n .. code-block:: python\n\n import tensorflow_text as text # noqa # pylint: disable\n import bentoml\n\n tag = bentoml.tensorflow.import_from_tfhub("https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")\n\n # load model back with `load`:\n model = bentoml.tensorflow.load(tag, load_as_hub_module=True)\n\n\n Example for importing a custom Tensorflow Hub model:\n\n .. code-block:: python\n\n import tensorflow as tf\n import tensorflow_hub as hub\n import bentoml\n\n def _plus_one_model_tf2():\n obj = tf.train.Checkpoint()\n\n @tf.function(input_signature=[tf.TensorSpec(None, dtype=tf.float32)])\n def plus_one(x):\n return x + 1\n\n obj.__call__ = plus_one\n return obj\n\n # then save the given model to BentoML modelstore:\n model = _plus_one_model_tf2()\n tag = bentoml.tensorflow.import_from_tfhub(model)\n '
if (hub is None):
raise MissingDependencyException(' `tensorflow_hub` does not exists.\n Make sure to `pip install --upgrade tensorflow_hub` before using.\n ')
context: t.Dict[(str, t.Any)] = {'framework_name': 'tensorflow', 'pip_dependencies': [f'tensorflow=={get_tf_version()}', f"tensorflow_hub=={importlib_metadata.version('tensorflow_hub')}"], 'import_from_tfhub': True}
if (name is None):
if isinstance(identifier, str):
name = _clean_name(identifier)
else:
name = f'{identifier.__class__.__name__}_{uuid.uuid4().hex[:5].upper()}'
with bentoml.models.create(name, module=MODULE_NAME, options=None, context=context, metadata=metadata, labels=labels, custom_objects=custom_objects) as _model:
if isinstance(identifier, str):
current_cache_dir = os.environ.get('TFHUB_CACHE_DIR')
os.environ['TFHUB_CACHE_DIR'] = _model.path
fpath: str = resolve(identifier)
folder = fpath.split('/')[(- 1)]
_model.info.options = {'model': identifier, 'local_path': folder}
if (current_cache_dir is not None):
os.environ['TFHUB_CACHE_DIR'] = current_cache_dir
else:
if hasattr(identifier, 'export'):
with tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph()) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
identifier.export(_model.path, sess)
else:
tf.saved_model.save(identifier, _model.path)
_model.info.options = {'model': identifier.__class__.__name__, 'local_path': '.'}
return _model.tag | 5,087,025,802,375,021,000 | Import a model from `Tensorflow Hub <https://tfhub.dev/>`_ to BentoML modelstore.
Args:
identifier (:code:`Union[str, tensorflow_hub.Module, tensorflow_hub.KerasLayer]`): Identifier accepts
two type of inputs:
- if `type` of :code:`identifier` either of type :code:`tensorflow_hub.Module` (**legacy** `tensorflow_hub`) or :code:`tensorflow_hub.KerasLayer` (`tensorflow_hub`), then we will save the given model to a :code:`SavedModel` format.
- if `type` of :code:`identifier` is a :obj:`str`, we assume that this is the URI retrieved from Tensorflow Hub. We then clean the given URI, and get a local copy of a given model to BentoML modelstore. name (:code:`str`, `optional`, defaults to `None`): An optional name for the model. If :code:`identifier` is a :obj:`str`, then name can be autogenerated from the given URI.
name (:code:`str`, `optional`, default to `None`):
Optional name for the saved model. If None, then name will be generated from :code:`identifier`.
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml.Tag`: A :obj:`~bentoml.Tag` object that can be used to retrieve the model with :func:`bentoml.tensorflow.load`:
Example for importing a model from Tensorflow Hub:
.. code-block:: python
import tensorflow_text as text # noqa # pylint: disable
import bentoml
tag = bentoml.tensorflow.import_from_tfhub("https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")
# load model back with `load`:
model = bentoml.tensorflow.load(tag, load_as_hub_module=True)
Example for importing a custom Tensorflow Hub model:
.. code-block:: python
import tensorflow as tf
import tensorflow_hub as hub
import bentoml
def _plus_one_model_tf2():
obj = tf.train.Checkpoint()
@tf.function(input_signature=[tf.TensorSpec(None, dtype=tf.float32)])
def plus_one(x):
return x + 1
obj.__call__ = plus_one
return obj
# then save the given model to BentoML modelstore:
model = _plus_one_model_tf2()
tag = bentoml.tensorflow.import_from_tfhub(model) | bentoml/_internal/frameworks/tensorflow_v2.py | import_from_tfhub | almirb/BentoML | python | @inject
def import_from_tfhub(identifier: t.Union[(str, 'HubModule', 'KerasLayer')], name: t.Optional[str]=None, labels: t.Optional[t.Dict[(str, str)]]=None, custom_objects: t.Optional[t.Dict[(str, t.Any)]]=None, metadata: t.Optional[t.Dict[(str, t.Any)]]=None, model_store: 'ModelStore'=Provide[BentoMLContainer.model_store]) -> Tag:
'\n Import a model from `Tensorflow Hub <https://tfhub.dev/>`_ to BentoML modelstore.\n\n Args:\n identifier (:code:`Union[str, tensorflow_hub.Module, tensorflow_hub.KerasLayer]`): Identifier accepts\n two type of inputs:\n - if `type` of :code:`identifier` either of type :code:`tensorflow_hub.Module` (**legacy** `tensorflow_hub`) or :code:`tensorflow_hub.KerasLayer` (`tensorflow_hub`), then we will save the given model to a :code:`SavedModel` format.\n - if `type` of :code:`identifier` is a :obj:`str`, we assume that this is the URI retrieved from Tensorflow Hub. We then clean the given URI, and get a local copy of a given model to BentoML modelstore. name (:code:`str`, `optional`, defaults to `None`): An optional name for the model. If :code:`identifier` is a :obj:`str`, then name can be autogenerated from the given URI.\n name (:code:`str`, `optional`, default to `None`):\n Optional name for the saved model. If None, then name will be generated from :code:`identifier`.\n labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):\n user-defined labels for managing models, e.g. team=nlp, stage=dev\n custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):\n user-defined additional python objects to be saved alongside the model,\n e.g. a tokenizer instance, preprocessor function, model configuration json\n metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):\n Custom metadata for given model.\n model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):\n BentoML modelstore, provided by DI Container.\n\n Returns:\n :obj:`~bentoml.Tag`: A :obj:`~bentoml.Tag` object that can be used to retrieve the model with :func:`bentoml.tensorflow.load`:\n\n Example for importing a model from Tensorflow Hub:\n\n .. code-block:: python\n\n import tensorflow_text as text # noqa # pylint: disable\n import bentoml\n\n tag = bentoml.tensorflow.import_from_tfhub("https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")\n\n # load model back with `load`:\n model = bentoml.tensorflow.load(tag, load_as_hub_module=True)\n\n\n Example for importing a custom Tensorflow Hub model:\n\n .. code-block:: python\n\n import tensorflow as tf\n import tensorflow_hub as hub\n import bentoml\n\n def _plus_one_model_tf2():\n obj = tf.train.Checkpoint()\n\n @tf.function(input_signature=[tf.TensorSpec(None, dtype=tf.float32)])\n def plus_one(x):\n return x + 1\n\n obj.__call__ = plus_one\n return obj\n\n # then save the given model to BentoML modelstore:\n model = _plus_one_model_tf2()\n tag = bentoml.tensorflow.import_from_tfhub(model)\n '
if (hub is None):
raise MissingDependencyException(' `tensorflow_hub` does not exists.\n Make sure to `pip install --upgrade tensorflow_hub` before using.\n ')
context: t.Dict[(str, t.Any)] = {'framework_name': 'tensorflow', 'pip_dependencies': [f'tensorflow=={get_tf_version()}', f"tensorflow_hub=={importlib_metadata.version('tensorflow_hub')}"], 'import_from_tfhub': True}
if (name is None):
if isinstance(identifier, str):
name = _clean_name(identifier)
else:
name = f'{identifier.__class__.__name__}_{uuid.uuid4().hex[:5].upper()}'
with bentoml.models.create(name, module=MODULE_NAME, options=None, context=context, metadata=metadata, labels=labels, custom_objects=custom_objects) as _model:
if isinstance(identifier, str):
current_cache_dir = os.environ.get('TFHUB_CACHE_DIR')
os.environ['TFHUB_CACHE_DIR'] = _model.path
fpath: str = resolve(identifier)
folder = fpath.split('/')[(- 1)]
_model.info.options = {'model': identifier, 'local_path': folder}
if (current_cache_dir is not None):
os.environ['TFHUB_CACHE_DIR'] = current_cache_dir
else:
if hasattr(identifier, 'export'):
with tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph()) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
identifier.export(_model.path, sess)
else:
tf.saved_model.save(identifier, _model.path)
_model.info.options = {'model': identifier.__class__.__name__, 'local_path': '.'}
return _model.tag |
@inject
def save(name: str, model: t.Union[('PathType', 'tf_ext.KerasModel', 'tf_ext.Module')], *, signatures: t.Optional['tf_ext.ConcreteFunction']=None, options: t.Optional['tf_ext.SaveOptions']=None, labels: t.Optional[t.Dict[(str, str)]]=None, custom_objects: t.Optional[t.Dict[(str, t.Any)]]=None, metadata: t.Optional[t.Dict[(str, t.Any)]]=None, model_store: 'ModelStore'=Provide[BentoMLContainer.model_store]) -> Tag:
'\n Save a model instance to BentoML modelstore.\n\n Args:\n name (:code:`str`):\n Name for given model instance. This should pass Python identifier check.\n model (:code:`Union[keras.Model, tf.Module, path-like objects]`):\n Instance of model to be saved\n labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):\n user-defined labels for managing models, e.g. team=nlp, stage=dev\n custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):\n user-defined additional python objects to be saved alongside the model,\n e.g. a tokenizer instance, preprocessor function, model configuration json\n metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):\n Custom metadata for given model.\n model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):\n BentoML modelstore, provided by DI Container.\n signatures (:code:`Union[Callable[..., Any], dict]`, `optional`, default to :code:`None`):\n Refers to `Signatures explanation <https://www.tensorflow.org/api_docs/python/tf/saved_model/save>`_\n from Tensorflow documentation for more information.\n options (`tf.saved_model.SaveOptions`, `optional`, default to :code:`None`):\n :obj:`tf.saved_model.SaveOptions` object that specifies options for saving.\n\n Raises:\n ValueError: If :obj:`obj` is not trackable.\n\n Returns:\n :obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model\'s name, and a generated `version` by BentoML.\n\n Examples:\n\n .. code-block:: python\n\n import tensorflow as tf\n import numpy as np\n import bentoml\n\n class NativeModel(tf.Module):\n def __init__(self):\n super().__init__()\n self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]])\n self.dense = lambda inputs: tf.matmul(inputs, self.weights)\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="inputs")]\n )\n def __call__(self, inputs):\n return self.dense(inputs)\n\n # then save the given model to BentoML modelstore:\n model = NativeModel()\n tag = bentoml.tensorflow.save("native_toy", model)\n\n .. note::\n\n :code:`bentoml.tensorflow.save` API also support saving `RaggedTensor <https://www.tensorflow.org/guide/ragged_tensor>`_ model and Keras model. If you choose to save a Keras model\n with :code:`bentoml.tensorflow.save`, then the model will be saved under a :obj:`SavedModel` format instead of :obj:`.h5`.\n\n '
context: t.Dict[(str, t.Any)] = {'framework_name': 'tensorflow', 'pip_dependencies': [f'tensorflow=={get_tf_version()}'], 'import_from_tfhub': False}
with bentoml.models.create(name, module=MODULE_NAME, options=None, context=context, labels=labels, custom_objects=custom_objects, metadata=metadata) as _model:
if isinstance(model, (str, bytes, os.PathLike, pathlib.Path)):
assert os.path.isdir(model)
copy_tree(str(model), _model.path)
else:
if options:
logger.warning(f"Parameter 'options: {str(options)}' is ignored when using tensorflow {get_tf_version()}")
tf.saved_model.save(model, _model.path, signatures=signatures, options=options)
return _model.tag | -6,741,630,064,561,893,000 | Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:code:`Union[keras.Model, tf.Module, path-like objects]`):
Instance of model to be saved
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
signatures (:code:`Union[Callable[..., Any], dict]`, `optional`, default to :code:`None`):
Refers to `Signatures explanation <https://www.tensorflow.org/api_docs/python/tf/saved_model/save>`_
from Tensorflow documentation for more information.
options (`tf.saved_model.SaveOptions`, `optional`, default to :code:`None`):
:obj:`tf.saved_model.SaveOptions` object that specifies options for saving.
Raises:
ValueError: If :obj:`obj` is not trackable.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import tensorflow as tf
import numpy as np
import bentoml
class NativeModel(tf.Module):
def __init__(self):
super().__init__()
self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]])
self.dense = lambda inputs: tf.matmul(inputs, self.weights)
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="inputs")]
)
def __call__(self, inputs):
return self.dense(inputs)
# then save the given model to BentoML modelstore:
model = NativeModel()
tag = bentoml.tensorflow.save("native_toy", model)
.. note::
:code:`bentoml.tensorflow.save` API also support saving `RaggedTensor <https://www.tensorflow.org/guide/ragged_tensor>`_ model and Keras model. If you choose to save a Keras model
with :code:`bentoml.tensorflow.save`, then the model will be saved under a :obj:`SavedModel` format instead of :obj:`.h5`. | bentoml/_internal/frameworks/tensorflow_v2.py | save | almirb/BentoML | python | @inject
def save(name: str, model: t.Union[('PathType', 'tf_ext.KerasModel', 'tf_ext.Module')], *, signatures: t.Optional['tf_ext.ConcreteFunction']=None, options: t.Optional['tf_ext.SaveOptions']=None, labels: t.Optional[t.Dict[(str, str)]]=None, custom_objects: t.Optional[t.Dict[(str, t.Any)]]=None, metadata: t.Optional[t.Dict[(str, t.Any)]]=None, model_store: 'ModelStore'=Provide[BentoMLContainer.model_store]) -> Tag:
'\n Save a model instance to BentoML modelstore.\n\n Args:\n name (:code:`str`):\n Name for given model instance. This should pass Python identifier check.\n model (:code:`Union[keras.Model, tf.Module, path-like objects]`):\n Instance of model to be saved\n labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):\n user-defined labels for managing models, e.g. team=nlp, stage=dev\n custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):\n user-defined additional python objects to be saved alongside the model,\n e.g. a tokenizer instance, preprocessor function, model configuration json\n metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):\n Custom metadata for given model.\n model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):\n BentoML modelstore, provided by DI Container.\n signatures (:code:`Union[Callable[..., Any], dict]`, `optional`, default to :code:`None`):\n Refers to `Signatures explanation <https://www.tensorflow.org/api_docs/python/tf/saved_model/save>`_\n from Tensorflow documentation for more information.\n options (`tf.saved_model.SaveOptions`, `optional`, default to :code:`None`):\n :obj:`tf.saved_model.SaveOptions` object that specifies options for saving.\n\n Raises:\n ValueError: If :obj:`obj` is not trackable.\n\n Returns:\n :obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model\'s name, and a generated `version` by BentoML.\n\n Examples:\n\n .. code-block:: python\n\n import tensorflow as tf\n import numpy as np\n import bentoml\n\n class NativeModel(tf.Module):\n def __init__(self):\n super().__init__()\n self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]])\n self.dense = lambda inputs: tf.matmul(inputs, self.weights)\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="inputs")]\n )\n def __call__(self, inputs):\n return self.dense(inputs)\n\n # then save the given model to BentoML modelstore:\n model = NativeModel()\n tag = bentoml.tensorflow.save("native_toy", model)\n\n .. note::\n\n :code:`bentoml.tensorflow.save` API also support saving `RaggedTensor <https://www.tensorflow.org/guide/ragged_tensor>`_ model and Keras model. If you choose to save a Keras model\n with :code:`bentoml.tensorflow.save`, then the model will be saved under a :obj:`SavedModel` format instead of :obj:`.h5`.\n\n '
context: t.Dict[(str, t.Any)] = {'framework_name': 'tensorflow', 'pip_dependencies': [f'tensorflow=={get_tf_version()}'], 'import_from_tfhub': False}
with bentoml.models.create(name, module=MODULE_NAME, options=None, context=context, labels=labels, custom_objects=custom_objects, metadata=metadata) as _model:
if isinstance(model, (str, bytes, os.PathLike, pathlib.Path)):
assert os.path.isdir(model)
copy_tree(str(model), _model.path)
else:
if options:
logger.warning(f"Parameter 'options: {str(options)}' is ignored when using tensorflow {get_tf_version()}")
tf.saved_model.save(model, _model.path, signatures=signatures, options=options)
return _model.tag |
def load_runner(tag: t.Union[(str, Tag)], *, predict_fn_name: str='__call__', device_id: str='CPU:0', name: t.Optional[str]=None, partial_kwargs: t.Optional[t.Dict[(str, t.Any)]]=None) -> '_TensorflowRunner':
'\n Runner represents a unit of serving logic that can be scaled horizontally to\n maximize throughput. `bentoml.tensorflow.load_runner` implements a Runner class that\n wrap around a Tensorflow model, which optimize it for the BentoML runtime.\n\n Args:\n tag (:code:`Union[str, Tag]`):\n Tag of a saved model in BentoML local modelstore.\n predict_fn_name (:code:`str`, default to :code:`__call__`):\n Inference function to be used.\n partial_kwargs (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):\n Dictionary of partial kwargs that can be shared across different model.\n device_id (:code:`str`, `optional`, default to the first CPU):\n Optional devices to put the given model on. Refers to `Logical Devices <https://www.tensorflow.org/api_docs/python/tf/config/list_logical_devices>`_ from TF documentation.\n\n Returns:\n :obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.tensorflow` model\n\n Examples:\n\n .. code-block:: python\n\n import bentoml\n\n # load a runner from a given flag\n runner = bentoml.tensorflow.load_runner(tag)\n\n # load a runner on GPU:0\n runner = bentoml.tensorflow.load_runner(tag, resource_quota=dict(gpus=0), device_id="GPU:0")\n\n '
return _TensorflowRunner(tag=tag, predict_fn_name=predict_fn_name, device_id=device_id, partial_kwargs=partial_kwargs, name=name) | -1,421,393,638,133,696,500 | Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. `bentoml.tensorflow.load_runner` implements a Runner class that
wrap around a Tensorflow model, which optimize it for the BentoML runtime.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
predict_fn_name (:code:`str`, default to :code:`__call__`):
Inference function to be used.
partial_kwargs (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Dictionary of partial kwargs that can be shared across different model.
device_id (:code:`str`, `optional`, default to the first CPU):
Optional devices to put the given model on. Refers to `Logical Devices <https://www.tensorflow.org/api_docs/python/tf/config/list_logical_devices>`_ from TF documentation.
Returns:
:obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.tensorflow` model
Examples:
.. code-block:: python
import bentoml
# load a runner from a given flag
runner = bentoml.tensorflow.load_runner(tag)
# load a runner on GPU:0
runner = bentoml.tensorflow.load_runner(tag, resource_quota=dict(gpus=0), device_id="GPU:0") | bentoml/_internal/frameworks/tensorflow_v2.py | load_runner | almirb/BentoML | python | def load_runner(tag: t.Union[(str, Tag)], *, predict_fn_name: str='__call__', device_id: str='CPU:0', name: t.Optional[str]=None, partial_kwargs: t.Optional[t.Dict[(str, t.Any)]]=None) -> '_TensorflowRunner':
'\n Runner represents a unit of serving logic that can be scaled horizontally to\n maximize throughput. `bentoml.tensorflow.load_runner` implements a Runner class that\n wrap around a Tensorflow model, which optimize it for the BentoML runtime.\n\n Args:\n tag (:code:`Union[str, Tag]`):\n Tag of a saved model in BentoML local modelstore.\n predict_fn_name (:code:`str`, default to :code:`__call__`):\n Inference function to be used.\n partial_kwargs (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):\n Dictionary of partial kwargs that can be shared across different model.\n device_id (:code:`str`, `optional`, default to the first CPU):\n Optional devices to put the given model on. Refers to `Logical Devices <https://www.tensorflow.org/api_docs/python/tf/config/list_logical_devices>`_ from TF documentation.\n\n Returns:\n :obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.tensorflow` model\n\n Examples:\n\n .. code-block:: python\n\n import bentoml\n\n # load a runner from a given flag\n runner = bentoml.tensorflow.load_runner(tag)\n\n # load a runner on GPU:0\n runner = bentoml.tensorflow.load_runner(tag, resource_quota=dict(gpus=0), device_id="GPU:0")\n\n '
return _TensorflowRunner(tag=tag, predict_fn_name=predict_fn_name, device_id=device_id, partial_kwargs=partial_kwargs, name=name) |
def test_no_defaults(self, Manifest):
'\n LibreOffice does not use the Default element\n '
xml = '\n <Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">\n <Override PartName="/_rels/.rels" ContentType="application/vnd.openxmlformats-package.relationships+xml"/>\n </Types>\n '
node = fromstring(xml)
manifest = Manifest.from_tree(node)
exts = manifest.extensions
assert (exts == []) | -4,868,845,727,821,458,000 | LibreOffice does not use the Default element | openpyxl/packaging/tests/test_manifest.py | test_no_defaults | chenc2/openpyxl | python | def test_no_defaults(self, Manifest):
'\n \n '
xml = '\n <Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">\n <Override PartName="/_rels/.rels" ContentType="application/vnd.openxmlformats-package.relationships+xml"/>\n </Types>\n '
node = fromstring(xml)
manifest = Manifest.from_tree(node)
exts = manifest.extensions
assert (exts == []) |
def my_id(self) -> bytes32:
'If node has public cert use that one for id, if not use private.'
if (self.p2p_crt_path is not None):
pem_cert = x509.load_pem_x509_certificate(self.p2p_crt_path.read_bytes(), default_backend())
else:
pem_cert = x509.load_pem_x509_certificate(self._private_cert_path.read_bytes(), default_backend())
der_cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER)
der_cert = x509.load_der_x509_certificate(der_cert_bytes, default_backend())
return bytes32(der_cert.fingerprint(hashes.SHA256())) | -4,359,501,893,485,975,000 | If node has public cert use that one for id, if not use private. | greenberry/server/server.py | my_id | GreenBerry-Network/greenberry-blockchain | python | def my_id(self) -> bytes32:
if (self.p2p_crt_path is not None):
pem_cert = x509.load_pem_x509_certificate(self.p2p_crt_path.read_bytes(), default_backend())
else:
pem_cert = x509.load_pem_x509_certificate(self._private_cert_path.read_bytes(), default_backend())
der_cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER)
der_cert = x509.load_der_x509_certificate(der_cert_bytes, default_backend())
return bytes32(der_cert.fingerprint(hashes.SHA256())) |
async def garbage_collect_connections_task(self) -> None:
'\n Periodically checks for connections with no activity (have not sent us any data), and removes them,\n to allow room for other peers.\n '
while True:
(await asyncio.sleep(600))
to_remove: List[WSGreenBerryConnection] = []
for connection in self.all_connections.values():
if ((self._local_type == NodeType.FULL_NODE) and (connection.connection_type == NodeType.FULL_NODE)):
if ((time.time() - connection.last_message_time) > 1800):
to_remove.append(connection)
for connection in to_remove:
self.log.debug(f'Garbage collecting connection {connection.peer_host} due to inactivity')
(await connection.close())
to_remove_ban = []
for (peer_ip, ban_until_time) in self.banned_peers.items():
if (time.time() > ban_until_time):
to_remove_ban.append(peer_ip)
for peer_ip in to_remove_ban:
del self.banned_peers[peer_ip] | 692,973,664,444,766,700 | Periodically checks for connections with no activity (have not sent us any data), and removes them,
to allow room for other peers. | greenberry/server/server.py | garbage_collect_connections_task | GreenBerry-Network/greenberry-blockchain | python | async def garbage_collect_connections_task(self) -> None:
'\n Periodically checks for connections with no activity (have not sent us any data), and removes them,\n to allow room for other peers.\n '
while True:
(await asyncio.sleep(600))
to_remove: List[WSGreenBerryConnection] = []
for connection in self.all_connections.values():
if ((self._local_type == NodeType.FULL_NODE) and (connection.connection_type == NodeType.FULL_NODE)):
if ((time.time() - connection.last_message_time) > 1800):
to_remove.append(connection)
for connection in to_remove:
self.log.debug(f'Garbage collecting connection {connection.peer_host} due to inactivity')
(await connection.close())
to_remove_ban = []
for (peer_ip, ban_until_time) in self.banned_peers.items():
if (time.time() > ban_until_time):
to_remove_ban.append(peer_ip)
for peer_ip in to_remove_ban:
del self.banned_peers[peer_ip] |
async def start_client(self, target_node: PeerInfo, on_connect: Callable=None, auth: bool=False, is_feeler: bool=False) -> bool:
'\n Tries to connect to the target node, adding one connection into the pipeline, if successful.\n An on connect method can also be specified, and this will be saved into the instance variables.\n '
if self.is_duplicate_or_self_connection(target_node):
return False
if ((target_node.host in self.banned_peers) and (time.time() < self.banned_peers[target_node.host])):
self.log.warning(f'Peer {target_node.host} is still banned, not connecting to it')
return False
if auth:
ssl_context = ssl_context_for_client(self.ca_private_crt_path, self.ca_private_key_path, self._private_cert_path, self._private_key_path)
else:
ssl_context = ssl_context_for_client(self.greenberry_ca_crt_path, self.greenberry_ca_key_path, self.p2p_crt_path, self.p2p_key_path)
session = None
connection: Optional[WSGreenBerryConnection] = None
try:
timeout = ClientTimeout(total=30)
session = ClientSession(timeout=timeout)
try:
if (type(ip_address(target_node.host)) is IPv6Address):
target_node = PeerInfo(f'[{target_node.host}]', target_node.port)
except ValueError:
pass
url = f'wss://{target_node.host}:{target_node.port}/ws'
self.log.debug(f'Connecting: {url}, Peer info: {target_node}')
try:
ws = (await session.ws_connect(url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=((50 * 1024) * 1024)))
except ServerDisconnectedError:
self.log.debug(f'Server disconnected error connecting to {url}. Perhaps we are banned by the peer.')
(await session.close())
return False
except asyncio.TimeoutError:
self.log.debug(f'Timeout error connecting to {url}')
(await session.close())
return False
if (ws is not None):
assert ((ws._response.connection is not None) and (ws._response.connection.transport is not None))
transport = ws._response.connection.transport
cert_bytes = transport._ssl_protocol._extra['ssl_object'].getpeercert(True)
der_cert = x509.load_der_x509_certificate(cert_bytes, default_backend())
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if (peer_id == self.node_id):
raise RuntimeError(f'Trying to connect to a peer ({target_node}) with the same peer_id: {peer_id}')
connection = WSGreenBerryConnection(self._local_type, ws, self._port, self.log, True, False, target_node.host, self.incoming_messages, self.connection_closed, peer_id, self._inbound_rate_limit_percent, self._outbound_rate_limit_percent, session=session)
handshake = (await connection.perform_handshake(self._network_id, protocol_version, self._port, self._local_type))
assert (handshake is True)
(await self.connection_added(connection, on_connect))
connection_type_str = ''
if (connection.connection_type is not None):
connection_type_str = connection.connection_type.name.lower()
self.log.info(f'Connected with {connection_type_str} {target_node}')
if is_feeler:
asyncio.create_task(connection.close())
return True
else:
(await session.close())
return False
except client_exceptions.ClientConnectorError as e:
self.log.info(f'{e}')
except ProtocolError as e:
if (connection is not None):
(await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code))
if (e.code == Err.INVALID_HANDSHAKE):
self.log.warning(f'Invalid handshake with peer {target_node}. Maybe the peer is running old software.')
elif (e.code == Err.INCOMPATIBLE_NETWORK_ID):
self.log.warning('Incompatible network ID. Maybe the peer is on another network')
elif (e.code == Err.SELF_CONNECTION):
pass
else:
error_stack = traceback.format_exc()
self.log.error(f'Exception {e}, exception Stack: {error_stack}')
except Exception as e:
if (connection is not None):
(await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN))
error_stack = traceback.format_exc()
self.log.error(f'Exception {e}, exception Stack: {error_stack}')
if (session is not None):
(await session.close())
return False | 1,556,936,749,418,891,500 | Tries to connect to the target node, adding one connection into the pipeline, if successful.
An on connect method can also be specified, and this will be saved into the instance variables. | greenberry/server/server.py | start_client | GreenBerry-Network/greenberry-blockchain | python | async def start_client(self, target_node: PeerInfo, on_connect: Callable=None, auth: bool=False, is_feeler: bool=False) -> bool:
'\n Tries to connect to the target node, adding one connection into the pipeline, if successful.\n An on connect method can also be specified, and this will be saved into the instance variables.\n '
if self.is_duplicate_or_self_connection(target_node):
return False
if ((target_node.host in self.banned_peers) and (time.time() < self.banned_peers[target_node.host])):
self.log.warning(f'Peer {target_node.host} is still banned, not connecting to it')
return False
if auth:
ssl_context = ssl_context_for_client(self.ca_private_crt_path, self.ca_private_key_path, self._private_cert_path, self._private_key_path)
else:
ssl_context = ssl_context_for_client(self.greenberry_ca_crt_path, self.greenberry_ca_key_path, self.p2p_crt_path, self.p2p_key_path)
session = None
connection: Optional[WSGreenBerryConnection] = None
try:
timeout = ClientTimeout(total=30)
session = ClientSession(timeout=timeout)
try:
if (type(ip_address(target_node.host)) is IPv6Address):
target_node = PeerInfo(f'[{target_node.host}]', target_node.port)
except ValueError:
pass
url = f'wss://{target_node.host}:{target_node.port}/ws'
self.log.debug(f'Connecting: {url}, Peer info: {target_node}')
try:
ws = (await session.ws_connect(url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=((50 * 1024) * 1024)))
except ServerDisconnectedError:
self.log.debug(f'Server disconnected error connecting to {url}. Perhaps we are banned by the peer.')
(await session.close())
return False
except asyncio.TimeoutError:
self.log.debug(f'Timeout error connecting to {url}')
(await session.close())
return False
if (ws is not None):
assert ((ws._response.connection is not None) and (ws._response.connection.transport is not None))
transport = ws._response.connection.transport
cert_bytes = transport._ssl_protocol._extra['ssl_object'].getpeercert(True)
der_cert = x509.load_der_x509_certificate(cert_bytes, default_backend())
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if (peer_id == self.node_id):
raise RuntimeError(f'Trying to connect to a peer ({target_node}) with the same peer_id: {peer_id}')
connection = WSGreenBerryConnection(self._local_type, ws, self._port, self.log, True, False, target_node.host, self.incoming_messages, self.connection_closed, peer_id, self._inbound_rate_limit_percent, self._outbound_rate_limit_percent, session=session)
handshake = (await connection.perform_handshake(self._network_id, protocol_version, self._port, self._local_type))
assert (handshake is True)
(await self.connection_added(connection, on_connect))
connection_type_str =
if (connection.connection_type is not None):
connection_type_str = connection.connection_type.name.lower()
self.log.info(f'Connected with {connection_type_str} {target_node}')
if is_feeler:
asyncio.create_task(connection.close())
return True
else:
(await session.close())
return False
except client_exceptions.ClientConnectorError as e:
self.log.info(f'{e}')
except ProtocolError as e:
if (connection is not None):
(await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code))
if (e.code == Err.INVALID_HANDSHAKE):
self.log.warning(f'Invalid handshake with peer {target_node}. Maybe the peer is running old software.')
elif (e.code == Err.INCOMPATIBLE_NETWORK_ID):
self.log.warning('Incompatible network ID. Maybe the peer is on another network')
elif (e.code == Err.SELF_CONNECTION):
pass
else:
error_stack = traceback.format_exc()
self.log.error(f'Exception {e}, exception Stack: {error_stack}')
except Exception as e:
if (connection is not None):
(await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN))
error_stack = traceback.format_exc()
self.log.error(f'Exception {e}, exception Stack: {error_stack}')
if (session is not None):
(await session.close())
return False |
def from_numpy(X: np.ndarray, dist_type_schema: Dict[(int, str)]=None, lasso_beta: float=0.0, ridge_beta: float=0.0, use_bias: bool=False, hidden_layer_units: Iterable[int]=None, w_threshold: float=None, max_iter: int=100, tabu_edges: List[Tuple[(int, int)]]=None, tabu_parent_nodes: List[int]=None, tabu_child_nodes: List[int]=None, **kwargs) -> StructureModel:
'\n Learn the `StructureModel`, the graph structure with lasso regularisation\n describing conditional dependencies between variables in data presented as a numpy array.\n\n Based on DAGs with NO TEARS.\n @inproceedings{zheng2018dags,\n author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.},\n booktitle = {Advances in Neural Information Processing Systems},\n title = {{DAGs with NO TEARS: Continuous Optimization for Structure Learning}},\n year = {2018},\n codebase = {https://github.com/xunzheng/notears}\n }\n\n Args:\n X: 2d input data, axis=0 is data rows, axis=1 is data columns. Data must be row oriented.\n\n dist_type_schema: The dist type schema corresponding to the passed in data X.\n It maps the positional column in X to the string alias of a dist type.\n A list of alias names can be found in ``dist_type/__init__.py``.\n If None, assumes that all data in X is continuous.\n\n lasso_beta: Constant that multiplies the lasso term (l1 regularisation).\n NOTE when using nonlinearities, the l1 loss only applies to the dag_layer.\n\n use_bias: Whether to fit a bias parameter in the NOTEARS algorithm.\n\n ridge_beta: Constant that multiplies the ridge term (l2 regularisation).\n When using nonlinear layers use of this parameter is recommended.\n\n hidden_layer_units: An iterable where its length determine the number of layers used,\n and the numbers determine the number of nodes used for the layer in order.\n\n w_threshold: fixed threshold for absolute edge weights.\n\n max_iter: max number of dual ascent steps during optimisation.\n\n tabu_edges: list of edges(from, to) not to be included in the graph.\n\n tabu_parent_nodes: list of nodes banned from being a parent of any other nodes.\n\n tabu_child_nodes: list of nodes banned from being a child of any other nodes.\n\n **kwargs: additional arguments for NOTEARS MLP model\n\n Returns:\n StructureModel: a graph of conditional dependencies between data variables.\n\n Raises:\n ValueError: If X does not contain data.\n ValueError: If schema does not correspond to columns.\n '
if (not X.size):
raise ValueError('Input data X is empty, cannot learn any structure')
logging.info("Learning structure using 'NOTEARS' optimisation.")
check_array(X)
if (dist_type_schema is not None):
if set(range(X.shape[1])).symmetric_difference(set(dist_type_schema.keys())):
raise ValueError('Difference indices and expected indices. Got {} schema'.format(dist_type_schema))
dist_types = ([DistTypeContinuous(idx=idx) for idx in np.arange(X.shape[1])] if (dist_type_schema is None) else [dist_type_aliases[alias](idx=idx) for (idx, alias) in dist_type_schema.items()])
(_, d) = X.shape
if (hidden_layer_units is None):
hidden_layer_units = [0]
elif (isinstance(hidden_layer_units, list) and (not hidden_layer_units)):
hidden_layer_units = [0]
hidden_layer_bnds = (hidden_layer_units[0] if hidden_layer_units[0] else 1)
bnds = [((0, 0) if (i == j) else ((0, 0) if ((tabu_edges is not None) and ((i, j) in tabu_edges)) else ((0, 0) if ((tabu_parent_nodes is not None) and (i in tabu_parent_nodes)) else ((0, 0) if ((tabu_child_nodes is not None) and (j in tabu_child_nodes)) else (None, None))))) for j in range(d) for _ in range(hidden_layer_bnds) for i in range(d)]
model = NotearsMLP(n_features=d, dist_types=dist_types, hidden_layer_units=hidden_layer_units, lasso_beta=lasso_beta, ridge_beta=ridge_beta, bounds=bnds, use_bias=use_bias, **kwargs)
model.fit(X, max_iter=max_iter)
sm = StructureModel(model.adj)
if w_threshold:
sm.remove_edges_below_threshold(w_threshold)
mean_effect = model.adj_mean_effect
for (u, v, edge_dict) in sm.edges.data(True):
sm.add_edge(u, v, origin='learned', weight=edge_dict['weight'], mean_effect=mean_effect[(u, v)])
bias = model.bias
for node in sm.nodes():
value = None
if (bias is not None):
value = bias[node]
sm.nodes[node]['bias'] = value
for dist_type in dist_types:
sm.nodes[dist_type.idx]['dist_type'] = dist_type
sm.graph['structure_learner'] = model
return sm | 2,413,458,060,328,271,400 | Learn the `StructureModel`, the graph structure with lasso regularisation
describing conditional dependencies between variables in data presented as a numpy array.
Based on DAGs with NO TEARS.
@inproceedings{zheng2018dags,
author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.},
booktitle = {Advances in Neural Information Processing Systems},
title = {{DAGs with NO TEARS: Continuous Optimization for Structure Learning}},
year = {2018},
codebase = {https://github.com/xunzheng/notears}
}
Args:
X: 2d input data, axis=0 is data rows, axis=1 is data columns. Data must be row oriented.
dist_type_schema: The dist type schema corresponding to the passed in data X.
It maps the positional column in X to the string alias of a dist type.
A list of alias names can be found in ``dist_type/__init__.py``.
If None, assumes that all data in X is continuous.
lasso_beta: Constant that multiplies the lasso term (l1 regularisation).
NOTE when using nonlinearities, the l1 loss only applies to the dag_layer.
use_bias: Whether to fit a bias parameter in the NOTEARS algorithm.
ridge_beta: Constant that multiplies the ridge term (l2 regularisation).
When using nonlinear layers use of this parameter is recommended.
hidden_layer_units: An iterable where its length determine the number of layers used,
and the numbers determine the number of nodes used for the layer in order.
w_threshold: fixed threshold for absolute edge weights.
max_iter: max number of dual ascent steps during optimisation.
tabu_edges: list of edges(from, to) not to be included in the graph.
tabu_parent_nodes: list of nodes banned from being a parent of any other nodes.
tabu_child_nodes: list of nodes banned from being a child of any other nodes.
**kwargs: additional arguments for NOTEARS MLP model
Returns:
StructureModel: a graph of conditional dependencies between data variables.
Raises:
ValueError: If X does not contain data.
ValueError: If schema does not correspond to columns. | causalnex/structure/pytorch/notears.py | from_numpy | mkretsch327/causalnex | python | def from_numpy(X: np.ndarray, dist_type_schema: Dict[(int, str)]=None, lasso_beta: float=0.0, ridge_beta: float=0.0, use_bias: bool=False, hidden_layer_units: Iterable[int]=None, w_threshold: float=None, max_iter: int=100, tabu_edges: List[Tuple[(int, int)]]=None, tabu_parent_nodes: List[int]=None, tabu_child_nodes: List[int]=None, **kwargs) -> StructureModel:
'\n Learn the `StructureModel`, the graph structure with lasso regularisation\n describing conditional dependencies between variables in data presented as a numpy array.\n\n Based on DAGs with NO TEARS.\n @inproceedings{zheng2018dags,\n author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.},\n booktitle = {Advances in Neural Information Processing Systems},\n title = {{DAGs with NO TEARS: Continuous Optimization for Structure Learning}},\n year = {2018},\n codebase = {https://github.com/xunzheng/notears}\n }\n\n Args:\n X: 2d input data, axis=0 is data rows, axis=1 is data columns. Data must be row oriented.\n\n dist_type_schema: The dist type schema corresponding to the passed in data X.\n It maps the positional column in X to the string alias of a dist type.\n A list of alias names can be found in ``dist_type/__init__.py``.\n If None, assumes that all data in X is continuous.\n\n lasso_beta: Constant that multiplies the lasso term (l1 regularisation).\n NOTE when using nonlinearities, the l1 loss only applies to the dag_layer.\n\n use_bias: Whether to fit a bias parameter in the NOTEARS algorithm.\n\n ridge_beta: Constant that multiplies the ridge term (l2 regularisation).\n When using nonlinear layers use of this parameter is recommended.\n\n hidden_layer_units: An iterable where its length determine the number of layers used,\n and the numbers determine the number of nodes used for the layer in order.\n\n w_threshold: fixed threshold for absolute edge weights.\n\n max_iter: max number of dual ascent steps during optimisation.\n\n tabu_edges: list of edges(from, to) not to be included in the graph.\n\n tabu_parent_nodes: list of nodes banned from being a parent of any other nodes.\n\n tabu_child_nodes: list of nodes banned from being a child of any other nodes.\n\n **kwargs: additional arguments for NOTEARS MLP model\n\n Returns:\n StructureModel: a graph of conditional dependencies between data variables.\n\n Raises:\n ValueError: If X does not contain data.\n ValueError: If schema does not correspond to columns.\n '
if (not X.size):
raise ValueError('Input data X is empty, cannot learn any structure')
logging.info("Learning structure using 'NOTEARS' optimisation.")
check_array(X)
if (dist_type_schema is not None):
if set(range(X.shape[1])).symmetric_difference(set(dist_type_schema.keys())):
raise ValueError('Difference indices and expected indices. Got {} schema'.format(dist_type_schema))
dist_types = ([DistTypeContinuous(idx=idx) for idx in np.arange(X.shape[1])] if (dist_type_schema is None) else [dist_type_aliases[alias](idx=idx) for (idx, alias) in dist_type_schema.items()])
(_, d) = X.shape
if (hidden_layer_units is None):
hidden_layer_units = [0]
elif (isinstance(hidden_layer_units, list) and (not hidden_layer_units)):
hidden_layer_units = [0]
hidden_layer_bnds = (hidden_layer_units[0] if hidden_layer_units[0] else 1)
bnds = [((0, 0) if (i == j) else ((0, 0) if ((tabu_edges is not None) and ((i, j) in tabu_edges)) else ((0, 0) if ((tabu_parent_nodes is not None) and (i in tabu_parent_nodes)) else ((0, 0) if ((tabu_child_nodes is not None) and (j in tabu_child_nodes)) else (None, None))))) for j in range(d) for _ in range(hidden_layer_bnds) for i in range(d)]
model = NotearsMLP(n_features=d, dist_types=dist_types, hidden_layer_units=hidden_layer_units, lasso_beta=lasso_beta, ridge_beta=ridge_beta, bounds=bnds, use_bias=use_bias, **kwargs)
model.fit(X, max_iter=max_iter)
sm = StructureModel(model.adj)
if w_threshold:
sm.remove_edges_below_threshold(w_threshold)
mean_effect = model.adj_mean_effect
for (u, v, edge_dict) in sm.edges.data(True):
sm.add_edge(u, v, origin='learned', weight=edge_dict['weight'], mean_effect=mean_effect[(u, v)])
bias = model.bias
for node in sm.nodes():
value = None
if (bias is not None):
value = bias[node]
sm.nodes[node]['bias'] = value
for dist_type in dist_types:
sm.nodes[dist_type.idx]['dist_type'] = dist_type
sm.graph['structure_learner'] = model
return sm |
def from_pandas(X: pd.DataFrame, dist_type_schema: Dict[(Union[(str, int)], str)]=None, lasso_beta: float=0.0, ridge_beta: float=0.0, use_bias: bool=False, hidden_layer_units: Iterable[int]=None, max_iter: int=100, w_threshold: float=None, tabu_edges: List[Tuple[(str, str)]]=None, tabu_parent_nodes: List[str]=None, tabu_child_nodes: List[str]=None, **kwargs) -> StructureModel:
"\n Learn the `StructureModel`, the graph structure describing conditional dependencies between variables\n in data presented as a pandas dataframe.\n\n The optimisation is to minimise a score function :math:`F(W)` over the graph's\n weighted adjacency matrix, :math:`W`, subject to the a constraint function :math:`h(W)`,\n where :math:`h(W) == 0` characterises an acyclic graph.\n :math:`h(W) > 0` is a continuous, differentiable function that encapsulated how acyclic the graph is\n (less == more acyclic).\n Full details of this approach to structure learning are provided in the publication:\n\n Based on DAGs with NO TEARS.\n @inproceedings{zheng2018dags,\n author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.},\n booktitle = {Advances in Neural Information Processing Systems},\n title = {{DAGs with NO TEARS: Continuous Optimization for Structure Learning}},\n year = {2018},\n codebase = {https://github.com/xunzheng/notears}\n }\n\n Args:\n X: 2d input data, axis=0 is data rows, axis=1 is data columns. Data must be row oriented.\n\n dist_type_schema: The dist type schema corresponding to the passed in data X.\n It maps the pandas column name in X to the string alias of a dist type.\n A list of alias names can be found in ``dist_type/__init__.py``.\n If None, assumes that all data in X is continuous.\n\n lasso_beta: Constant that multiplies the lasso term (l1 regularisation).\n NOTE when using nonlinearities, the l1 loss only applies to the dag_layer.\n\n use_bias: Whether to fit a bias parameter in the NOTEARS algorithm.\n\n ridge_beta: Constant that multiplies the ridge term (l2 regularisation).\n When using nonlinear layers use of this parameter is recommended.\n\n hidden_layer_units: An iterable where its length determine the number of layers used,\n and the numbers determine the number of nodes used for the layer in order.\n\n w_threshold: fixed threshold for absolute edge weights.\n\n max_iter: max number of dual ascent steps during optimisation.\n\n tabu_edges: list of edges(from, to) not to be included in the graph.\n\n tabu_parent_nodes: list of nodes banned from being a parent of any other nodes.\n\n tabu_child_nodes: list of nodes banned from being a child of any other nodes.\n\n **kwargs: additional arguments for NOTEARS MLP model\n\n Returns:\n StructureModel: graph of conditional dependencies between data variables.\n\n Raises:\n ValueError: If X does not contain data.\n "
data = deepcopy(X)
dist_type_schema = (dist_type_schema if (dist_type_schema is None) else {X.columns.get_loc(col): alias for (col, alias) in dist_type_schema.items()})
non_numeric_cols = data.select_dtypes(exclude='number').columns
if (len(non_numeric_cols) > 0):
raise ValueError('All columns must have numeric data. Consider mapping the following columns to int {non_numeric_cols}'.format(non_numeric_cols=non_numeric_cols))
col_idx = {c: i for (i, c) in enumerate(data.columns)}
idx_col = {i: c for (c, i) in col_idx.items()}
if tabu_edges:
tabu_edges = [(col_idx[u], col_idx[v]) for (u, v) in tabu_edges]
if tabu_parent_nodes:
tabu_parent_nodes = [col_idx[n] for n in tabu_parent_nodes]
if tabu_child_nodes:
tabu_child_nodes = [col_idx[n] for n in tabu_child_nodes]
g = from_numpy(X=data.values, dist_type_schema=dist_type_schema, lasso_beta=lasso_beta, ridge_beta=ridge_beta, use_bias=use_bias, hidden_layer_units=hidden_layer_units, w_threshold=w_threshold, max_iter=max_iter, tabu_edges=tabu_edges, tabu_parent_nodes=tabu_parent_nodes, tabu_child_nodes=tabu_child_nodes, **kwargs)
sm = StructureModel()
sm.add_nodes_from(data.columns)
for (u, v, edge_dict) in g.edges.data(True):
sm.add_edge(idx_col[u], idx_col[v], origin='learned', weight=edge_dict['weight'], mean_effect=edge_dict['mean_effect'])
for (key, val) in g.graph.items():
sm.graph[key] = val
for node in g.nodes(data=True):
node_name = idx_col[node[0]]
sm.nodes[node_name]['bias'] = node[1]['bias']
for node in g.nodes(data=True):
node_name = idx_col[node[0]]
sm.nodes[node_name]['dist_type'] = node[1]['dist_type']
return sm | -1,140,084,792,508,126,500 | Learn the `StructureModel`, the graph structure describing conditional dependencies between variables
in data presented as a pandas dataframe.
The optimisation is to minimise a score function :math:`F(W)` over the graph's
weighted adjacency matrix, :math:`W`, subject to the a constraint function :math:`h(W)`,
where :math:`h(W) == 0` characterises an acyclic graph.
:math:`h(W) > 0` is a continuous, differentiable function that encapsulated how acyclic the graph is
(less == more acyclic).
Full details of this approach to structure learning are provided in the publication:
Based on DAGs with NO TEARS.
@inproceedings{zheng2018dags,
author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.},
booktitle = {Advances in Neural Information Processing Systems},
title = {{DAGs with NO TEARS: Continuous Optimization for Structure Learning}},
year = {2018},
codebase = {https://github.com/xunzheng/notears}
}
Args:
X: 2d input data, axis=0 is data rows, axis=1 is data columns. Data must be row oriented.
dist_type_schema: The dist type schema corresponding to the passed in data X.
It maps the pandas column name in X to the string alias of a dist type.
A list of alias names can be found in ``dist_type/__init__.py``.
If None, assumes that all data in X is continuous.
lasso_beta: Constant that multiplies the lasso term (l1 regularisation).
NOTE when using nonlinearities, the l1 loss only applies to the dag_layer.
use_bias: Whether to fit a bias parameter in the NOTEARS algorithm.
ridge_beta: Constant that multiplies the ridge term (l2 regularisation).
When using nonlinear layers use of this parameter is recommended.
hidden_layer_units: An iterable where its length determine the number of layers used,
and the numbers determine the number of nodes used for the layer in order.
w_threshold: fixed threshold for absolute edge weights.
max_iter: max number of dual ascent steps during optimisation.
tabu_edges: list of edges(from, to) not to be included in the graph.
tabu_parent_nodes: list of nodes banned from being a parent of any other nodes.
tabu_child_nodes: list of nodes banned from being a child of any other nodes.
**kwargs: additional arguments for NOTEARS MLP model
Returns:
StructureModel: graph of conditional dependencies between data variables.
Raises:
ValueError: If X does not contain data. | causalnex/structure/pytorch/notears.py | from_pandas | mkretsch327/causalnex | python | def from_pandas(X: pd.DataFrame, dist_type_schema: Dict[(Union[(str, int)], str)]=None, lasso_beta: float=0.0, ridge_beta: float=0.0, use_bias: bool=False, hidden_layer_units: Iterable[int]=None, max_iter: int=100, w_threshold: float=None, tabu_edges: List[Tuple[(str, str)]]=None, tabu_parent_nodes: List[str]=None, tabu_child_nodes: List[str]=None, **kwargs) -> StructureModel:
"\n Learn the `StructureModel`, the graph structure describing conditional dependencies between variables\n in data presented as a pandas dataframe.\n\n The optimisation is to minimise a score function :math:`F(W)` over the graph's\n weighted adjacency matrix, :math:`W`, subject to the a constraint function :math:`h(W)`,\n where :math:`h(W) == 0` characterises an acyclic graph.\n :math:`h(W) > 0` is a continuous, differentiable function that encapsulated how acyclic the graph is\n (less == more acyclic).\n Full details of this approach to structure learning are provided in the publication:\n\n Based on DAGs with NO TEARS.\n @inproceedings{zheng2018dags,\n author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.},\n booktitle = {Advances in Neural Information Processing Systems},\n title = {{DAGs with NO TEARS: Continuous Optimization for Structure Learning}},\n year = {2018},\n codebase = {https://github.com/xunzheng/notears}\n }\n\n Args:\n X: 2d input data, axis=0 is data rows, axis=1 is data columns. Data must be row oriented.\n\n dist_type_schema: The dist type schema corresponding to the passed in data X.\n It maps the pandas column name in X to the string alias of a dist type.\n A list of alias names can be found in ``dist_type/__init__.py``.\n If None, assumes that all data in X is continuous.\n\n lasso_beta: Constant that multiplies the lasso term (l1 regularisation).\n NOTE when using nonlinearities, the l1 loss only applies to the dag_layer.\n\n use_bias: Whether to fit a bias parameter in the NOTEARS algorithm.\n\n ridge_beta: Constant that multiplies the ridge term (l2 regularisation).\n When using nonlinear layers use of this parameter is recommended.\n\n hidden_layer_units: An iterable where its length determine the number of layers used,\n and the numbers determine the number of nodes used for the layer in order.\n\n w_threshold: fixed threshold for absolute edge weights.\n\n max_iter: max number of dual ascent steps during optimisation.\n\n tabu_edges: list of edges(from, to) not to be included in the graph.\n\n tabu_parent_nodes: list of nodes banned from being a parent of any other nodes.\n\n tabu_child_nodes: list of nodes banned from being a child of any other nodes.\n\n **kwargs: additional arguments for NOTEARS MLP model\n\n Returns:\n StructureModel: graph of conditional dependencies between data variables.\n\n Raises:\n ValueError: If X does not contain data.\n "
data = deepcopy(X)
dist_type_schema = (dist_type_schema if (dist_type_schema is None) else {X.columns.get_loc(col): alias for (col, alias) in dist_type_schema.items()})
non_numeric_cols = data.select_dtypes(exclude='number').columns
if (len(non_numeric_cols) > 0):
raise ValueError('All columns must have numeric data. Consider mapping the following columns to int {non_numeric_cols}'.format(non_numeric_cols=non_numeric_cols))
col_idx = {c: i for (i, c) in enumerate(data.columns)}
idx_col = {i: c for (c, i) in col_idx.items()}
if tabu_edges:
tabu_edges = [(col_idx[u], col_idx[v]) for (u, v) in tabu_edges]
if tabu_parent_nodes:
tabu_parent_nodes = [col_idx[n] for n in tabu_parent_nodes]
if tabu_child_nodes:
tabu_child_nodes = [col_idx[n] for n in tabu_child_nodes]
g = from_numpy(X=data.values, dist_type_schema=dist_type_schema, lasso_beta=lasso_beta, ridge_beta=ridge_beta, use_bias=use_bias, hidden_layer_units=hidden_layer_units, w_threshold=w_threshold, max_iter=max_iter, tabu_edges=tabu_edges, tabu_parent_nodes=tabu_parent_nodes, tabu_child_nodes=tabu_child_nodes, **kwargs)
sm = StructureModel()
sm.add_nodes_from(data.columns)
for (u, v, edge_dict) in g.edges.data(True):
sm.add_edge(idx_col[u], idx_col[v], origin='learned', weight=edge_dict['weight'], mean_effect=edge_dict['mean_effect'])
for (key, val) in g.graph.items():
sm.graph[key] = val
for node in g.nodes(data=True):
node_name = idx_col[node[0]]
sm.nodes[node_name]['bias'] = node[1]['bias']
for node in g.nodes(data=True):
node_name = idx_col[node[0]]
sm.nodes[node_name]['dist_type'] = node[1]['dist_type']
return sm |
def build_treeprocessors(md_instance, **kwargs):
' Build the default treeprocessors for Markdown. '
treeprocessors = odict.OrderedDict()
treeprocessors['inline'] = InlineProcessor(md_instance)
treeprocessors['prettify'] = PrettifyTreeprocessor(md_instance)
return treeprocessors | 2,951,572,762,128,452,600 | Build the default treeprocessors for Markdown. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | build_treeprocessors | Con-Mi/lambda-packs | python | def build_treeprocessors(md_instance, **kwargs):
' '
treeprocessors = odict.OrderedDict()
treeprocessors['inline'] = InlineProcessor(md_instance)
treeprocessors['prettify'] = PrettifyTreeprocessor(md_instance)
return treeprocessors |
def isString(s):
" Check if it's string "
if (not isinstance(s, util.AtomicString)):
return isinstance(s, str)
return False | -7,252,295,634,798,910,000 | Check if it's string | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | isString | Con-Mi/lambda-packs | python | def isString(s):
" "
if (not isinstance(s, util.AtomicString)):
return isinstance(s, str)
return False |
def run(self, root):
'\n Subclasses of Treeprocessor should implement a `run` method, which\n takes a root ElementTree. This method can return another ElementTree \n object, and the existing root ElementTree will be replaced, or it can \n modify the current tree and return None.\n '
pass | -3,420,337,297,505,160,000 | Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | run | Con-Mi/lambda-packs | python | def run(self, root):
'\n Subclasses of Treeprocessor should implement a `run` method, which\n takes a root ElementTree. This method can return another ElementTree \n object, and the existing root ElementTree will be replaced, or it can \n modify the current tree and return None.\n '
pass |
def __makePlaceholder(self, type):
' Generate a placeholder '
id = ('%04d' % len(self.stashed_nodes))
hash = (util.INLINE_PLACEHOLDER % id)
return (hash, id) | -8,206,074,628,519,016,000 | Generate a placeholder | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __makePlaceholder | Con-Mi/lambda-packs | python | def __makePlaceholder(self, type):
' '
id = ('%04d' % len(self.stashed_nodes))
hash = (util.INLINE_PLACEHOLDER % id)
return (hash, id) |
def __findPlaceholder(self, data, index):
'\n Extract id from data string, start from index\n\n Keyword arguments:\n\n * data: string\n * index: index, from which we start search\n\n Returns: placeholder id and string index, after the found placeholder.\n \n '
m = self.__placeholder_re.search(data, index)
if m:
return (m.group(1), m.end())
else:
return (None, (index + 1)) | -6,930,052,867,000,450,000 | Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __findPlaceholder | Con-Mi/lambda-packs | python | def __findPlaceholder(self, data, index):
'\n Extract id from data string, start from index\n\n Keyword arguments:\n\n * data: string\n * index: index, from which we start search\n\n Returns: placeholder id and string index, after the found placeholder.\n \n '
m = self.__placeholder_re.search(data, index)
if m:
return (m.group(1), m.end())
else:
return (None, (index + 1)) |
def __stashNode(self, node, type):
' Add node to stash '
(placeholder, id) = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder | 321,642,995,561,102,700 | Add node to stash | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __stashNode | Con-Mi/lambda-packs | python | def __stashNode(self, node, type):
' '
(placeholder, id) = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder |
def __handleInline(self, data, patternIndex=0):
'\n Process string with inline patterns and replace it\n with placeholders\n\n Keyword arguments:\n\n * data: A line of Markdown text\n * patternIndex: The index of the inlinePattern to start with\n\n Returns: String with placeholders.\n\n '
if (not isinstance(data, util.AtomicString)):
startIndex = 0
while (patternIndex < len(self.markdown.inlinePatterns)):
(data, matched, startIndex) = self.__applyPattern(self.markdown.inlinePatterns.value_for_index(patternIndex), data, patternIndex, startIndex)
if (not matched):
patternIndex += 1
return data | 6,206,016,853,998,460,000 | Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __handleInline | Con-Mi/lambda-packs | python | def __handleInline(self, data, patternIndex=0):
'\n Process string with inline patterns and replace it\n with placeholders\n\n Keyword arguments:\n\n * data: A line of Markdown text\n * patternIndex: The index of the inlinePattern to start with\n\n Returns: String with placeholders.\n\n '
if (not isinstance(data, util.AtomicString)):
startIndex = 0
while (patternIndex < len(self.markdown.inlinePatterns)):
(data, matched, startIndex) = self.__applyPattern(self.markdown.inlinePatterns.value_for_index(patternIndex), data, patternIndex, startIndex)
if (not matched):
patternIndex += 1
return data |
def __processElementText(self, node, subnode, isText=True):
"\n Process placeholders in Element.text or Element.tail\n of Elements popped from self.stashed_nodes.\n\n Keywords arguments:\n\n * node: parent node\n * subnode: processing node\n * isText: bool variable, True - it's text, False - it's tail\n\n Returns: None\n\n "
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode)
if ((not isText) and (node is not subnode)):
pos = node.getchildren().index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild) | 73,273,833,939,332,340 | Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __processElementText | Con-Mi/lambda-packs | python | def __processElementText(self, node, subnode, isText=True):
"\n Process placeholders in Element.text or Element.tail\n of Elements popped from self.stashed_nodes.\n\n Keywords arguments:\n\n * node: parent node\n * subnode: processing node\n * isText: bool variable, True - it's text, False - it's tail\n\n Returns: None\n\n "
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode)
if ((not isText) and (node is not subnode)):
pos = node.getchildren().index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild) |
def __processPlaceholders(self, data, parent):
'\n Process string with placeholders and generate ElementTree tree.\n\n Keyword arguments:\n\n * data: string with placeholders instead of ElementTree elements.\n * parent: Element, which contains processing inline data\n\n Returns: list with ElementTree elements with applied inline patterns.\n \n '
def linkText(text):
if text:
if result:
if result[(- 1)].tail:
result[(- 1)].tail += text
else:
result[(- 1)].tail = text
elif parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if (index != (- 1)):
(id, phEndIndex) = self.__findPlaceholder(data, index)
if (id in self.stashed_nodes):
node = self.stashed_nodes.get(id)
if (index > 0):
text = data[strartIndex:index]
linkText(text)
if (not isString(node)):
for child in ([node] + node.getchildren()):
if child.tail:
if child.tail.strip():
self.__processElementText(node, child, False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else:
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else:
end = (index + len(self.__placeholder_prefix))
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
if isinstance(data, util.AtomicString):
text = util.AtomicString(text)
linkText(text)
data = ''
return result | 4,380,680,562,387,093,500 | Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __processPlaceholders | Con-Mi/lambda-packs | python | def __processPlaceholders(self, data, parent):
'\n Process string with placeholders and generate ElementTree tree.\n\n Keyword arguments:\n\n * data: string with placeholders instead of ElementTree elements.\n * parent: Element, which contains processing inline data\n\n Returns: list with ElementTree elements with applied inline patterns.\n \n '
def linkText(text):
if text:
if result:
if result[(- 1)].tail:
result[(- 1)].tail += text
else:
result[(- 1)].tail = text
elif parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if (index != (- 1)):
(id, phEndIndex) = self.__findPlaceholder(data, index)
if (id in self.stashed_nodes):
node = self.stashed_nodes.get(id)
if (index > 0):
text = data[strartIndex:index]
linkText(text)
if (not isString(node)):
for child in ([node] + node.getchildren()):
if child.tail:
if child.tail.strip():
self.__processElementText(node, child, False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else:
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else:
end = (index + len(self.__placeholder_prefix))
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
if isinstance(data, util.AtomicString):
text = util.AtomicString(text)
linkText(text)
data =
return result |
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
'\n Check if the line fits the pattern, create the necessary\n elements, add it to stashed_nodes.\n\n Keyword arguments:\n\n * data: the text to be processed\n * pattern: the pattern to be checked\n * patternIndex: index of current pattern\n * startIndex: string index, from which we start searching\n\n Returns: String with placeholders instead of ElementTree elements.\n\n '
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if (not match):
return (data, False, 0)
node = pattern.handleMatch(match)
if (node is None):
return (data, True, (len(leftData) + match.span(len(match.groups()))[0]))
if (not isString(node)):
if (not isinstance(node.text, util.AtomicString)):
for child in ([node] + node.getchildren()):
if (not isString(node)):
if child.text:
child.text = self.__handleInline(child.text, (patternIndex + 1))
if child.tail:
child.tail = self.__handleInline(child.tail, patternIndex)
placeholder = self.__stashNode(node, pattern.type())
return (('%s%s%s%s' % (leftData, match.group(1), placeholder, match.groups()[(- 1)])), True, 0) | -3,648,883,033,776,715,300 | Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we start searching
Returns: String with placeholders instead of ElementTree elements. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __applyPattern | Con-Mi/lambda-packs | python | def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
'\n Check if the line fits the pattern, create the necessary\n elements, add it to stashed_nodes.\n\n Keyword arguments:\n\n * data: the text to be processed\n * pattern: the pattern to be checked\n * patternIndex: index of current pattern\n * startIndex: string index, from which we start searching\n\n Returns: String with placeholders instead of ElementTree elements.\n\n '
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if (not match):
return (data, False, 0)
node = pattern.handleMatch(match)
if (node is None):
return (data, True, (len(leftData) + match.span(len(match.groups()))[0]))
if (not isString(node)):
if (not isinstance(node.text, util.AtomicString)):
for child in ([node] + node.getchildren()):
if (not isString(node)):
if child.text:
child.text = self.__handleInline(child.text, (patternIndex + 1))
if child.tail:
child.tail = self.__handleInline(child.tail, patternIndex)
placeholder = self.__stashNode(node, pattern.type())
return (('%s%s%s%s' % (leftData, match.group(1), placeholder, match.groups()[(- 1)])), True, 0) |
def run(self, tree):
'Apply inline patterns to a parsed Markdown tree.\n\n Iterate over ElementTree, find elements with inline tag, apply inline\n patterns and append newly created Elements to tree. If you don\'t\n want to process your data with inline paterns, instead of normal string,\n use subclass AtomicString:\n\n node.text = markdown.AtomicString("This will not be processed.")\n\n Arguments:\n\n * tree: ElementTree object, representing Markdown tree.\n\n Returns: ElementTree object with applied inline patterns.\n\n '
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement.getchildren():
if (child.text and (not isinstance(child.text, util.AtomicString))):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(text), child)
stack += lst
insertQueue.append((child, lst))
if child.tail:
tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d')
tailResult = self.__processPlaceholders(tail, dumby)
if dumby.text:
child.tail = dumby.text
else:
child.tail = None
pos = (currElement.getchildren().index(child) + 1)
tailResult.reverse()
for newChild in tailResult:
currElement.insert(pos, newChild)
if child.getchildren():
stack.append(child)
for (element, lst) in insertQueue:
if self.markdown.enable_attributes:
if element.text:
element.text = inlinepatterns.handleAttributes(element.text, element)
i = 0
for newChild in lst:
if self.markdown.enable_attributes:
if newChild.tail:
newChild.tail = inlinepatterns.handleAttributes(newChild.tail, element)
if newChild.text:
newChild.text = inlinepatterns.handleAttributes(newChild.text, newChild)
element.insert(i, newChild)
i += 1
return tree | 8,504,789,209,487,850,000 | Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want to process your data with inline paterns, instead of normal string,
use subclass AtomicString:
node.text = markdown.AtomicString("This will not be processed.")
Arguments:
* tree: ElementTree object, representing Markdown tree.
Returns: ElementTree object with applied inline patterns. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | run | Con-Mi/lambda-packs | python | def run(self, tree):
'Apply inline patterns to a parsed Markdown tree.\n\n Iterate over ElementTree, find elements with inline tag, apply inline\n patterns and append newly created Elements to tree. If you don\'t\n want to process your data with inline paterns, instead of normal string,\n use subclass AtomicString:\n\n node.text = markdown.AtomicString("This will not be processed.")\n\n Arguments:\n\n * tree: ElementTree object, representing Markdown tree.\n\n Returns: ElementTree object with applied inline patterns.\n\n '
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement.getchildren():
if (child.text and (not isinstance(child.text, util.AtomicString))):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(text), child)
stack += lst
insertQueue.append((child, lst))
if child.tail:
tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d')
tailResult = self.__processPlaceholders(tail, dumby)
if dumby.text:
child.tail = dumby.text
else:
child.tail = None
pos = (currElement.getchildren().index(child) + 1)
tailResult.reverse()
for newChild in tailResult:
currElement.insert(pos, newChild)
if child.getchildren():
stack.append(child)
for (element, lst) in insertQueue:
if self.markdown.enable_attributes:
if element.text:
element.text = inlinepatterns.handleAttributes(element.text, element)
i = 0
for newChild in lst:
if self.markdown.enable_attributes:
if newChild.tail:
newChild.tail = inlinepatterns.handleAttributes(newChild.tail, element)
if newChild.text:
newChild.text = inlinepatterns.handleAttributes(newChild.text, newChild)
element.insert(i, newChild)
i += 1
return tree |
def _prettifyETree(self, elem):
' Recursively add linebreaks to ElementTree children. '
i = '\n'
if (util.isBlockLevel(elem.tag) and (elem.tag not in ['code', 'pre'])):
if (((not elem.text) or (not elem.text.strip())) and len(elem) and util.isBlockLevel(elem[0].tag)):
elem.text = i
for e in elem:
if util.isBlockLevel(e.tag):
self._prettifyETree(e)
if ((not elem.tail) or (not elem.tail.strip())):
elem.tail = i
if ((not elem.tail) or (not elem.tail.strip())):
elem.tail = i | -4,502,805,878,882,343,000 | Recursively add linebreaks to ElementTree children. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | _prettifyETree | Con-Mi/lambda-packs | python | def _prettifyETree(self, elem):
' '
i = '\n'
if (util.isBlockLevel(elem.tag) and (elem.tag not in ['code', 'pre'])):
if (((not elem.text) or (not elem.text.strip())) and len(elem) and util.isBlockLevel(elem[0].tag)):
elem.text = i
for e in elem:
if util.isBlockLevel(e.tag):
self._prettifyETree(e)
if ((not elem.tail) or (not elem.tail.strip())):
elem.tail = i
if ((not elem.tail) or (not elem.tail.strip())):
elem.tail = i |
def run(self, root):
' Add linebreaks to ElementTree root object. '
self._prettifyETree(root)
brs = root.getiterator('br')
for br in brs:
if ((not br.tail) or (not br.tail.strip())):
br.tail = '\n'
else:
br.tail = ('\n%s' % br.tail) | -3,843,561,070,745,513,500 | Add linebreaks to ElementTree root object. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | run | Con-Mi/lambda-packs | python | def run(self, root):
' '
self._prettifyETree(root)
brs = root.getiterator('br')
for br in brs:
if ((not br.tail) or (not br.tail.strip())):
br.tail = '\n'
else:
br.tail = ('\n%s' % br.tail) |
def add_goal_to_payload(func):
"Adds a goal to payload for 't-goal' key."
@functools.wraps(func)
def wrapper_inject_goal(ack: Ack, payload: Dict[(str, Any)], context: BoltContext):
try:
content = GoalContent(content=payload['state']['values'][CREATE_GOAL_INPUT_BLOCK][CREATE_GOAL_INPUT]['value']).content
except ValidationError as error:
return ack(response_action='errors', errors={CREATE_GOAL_INPUT_BLOCK: error.errors()[0]['msg']})
payload['t-goal'] = content
return func(ack=ack, payload=payload, context=context)
return wrapper_inject_goal | -3,697,957,113,407,593,000 | Adds a goal to payload for 't-goal' key. | teamiclink/slack/view_goal_create.py | add_goal_to_payload | e1004/teamiclink | python | def add_goal_to_payload(func):
@functools.wraps(func)
def wrapper_inject_goal(ack: Ack, payload: Dict[(str, Any)], context: BoltContext):
try:
content = GoalContent(content=payload['state']['values'][CREATE_GOAL_INPUT_BLOCK][CREATE_GOAL_INPUT]['value']).content
except ValidationError as error:
return ack(response_action='errors', errors={CREATE_GOAL_INPUT_BLOCK: error.errors()[0]['msg']})
payload['t-goal'] = content
return func(ack=ack, payload=payload, context=context)
return wrapper_inject_goal |
def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_func=None):
'\n @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...\n all args must be either tuple or list\n '
interrupt_trigger = [True]
max_progress = len(args)
if (max_progress == 1):
return self.download_and_append_fragments(*args[0], pack_func=pack_func, finish_func=finish_func)
max_workers = self.params.get('concurrent_fragment_downloads', 1)
if (max_progress > 1):
self._prepare_multiline_status(max_progress)
is_live = any(traverse_obj(args, (..., 2, 'is_live'), default=[]))
def thread_func(idx, ctx, fragments, info_dict, tpe):
ctx['max_progress'] = max_progress
ctx['progress_idx'] = idx
return self.download_and_append_fragments(ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func, tpe=tpe, interrupt_trigger=interrupt_trigger)
class FTPE(concurrent.futures.ThreadPoolExecutor):
def __exit__(self, exc_type, exc_val, exc_tb):
pass
if (compat_os_name == 'nt'):
def future_result(future):
while True:
try:
return future.result(0.1)
except KeyboardInterrupt:
raise
except concurrent.futures.TimeoutError:
continue
else:
def future_result(future):
return future.result()
def interrupt_trigger_iter(fg):
for f in fg:
if (not interrupt_trigger[0]):
break
(yield f)
spins = []
for (idx, (ctx, fragments, info_dict)) in enumerate(args):
tpe = FTPE(math.ceil((max_workers / max_progress)))
job = tpe.submit(thread_func, idx, ctx, interrupt_trigger_iter(fragments), info_dict, tpe)
spins.append((tpe, job))
result = True
for (tpe, job) in spins:
try:
result = (result and future_result(job))
except KeyboardInterrupt:
interrupt_trigger[0] = False
finally:
tpe.shutdown(wait=True)
if ((not interrupt_trigger[0]) and (not is_live)):
raise KeyboardInterrupt()
return result | 3,892,368,910,339,873,300 | @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
all args must be either tuple or list | yt_dlp/downloader/fragment.py | download_and_append_fragments_multiple | 9Fork/yt-dlp | python | def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_func=None):
'\n @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...\n all args must be either tuple or list\n '
interrupt_trigger = [True]
max_progress = len(args)
if (max_progress == 1):
return self.download_and_append_fragments(*args[0], pack_func=pack_func, finish_func=finish_func)
max_workers = self.params.get('concurrent_fragment_downloads', 1)
if (max_progress > 1):
self._prepare_multiline_status(max_progress)
is_live = any(traverse_obj(args, (..., 2, 'is_live'), default=[]))
def thread_func(idx, ctx, fragments, info_dict, tpe):
ctx['max_progress'] = max_progress
ctx['progress_idx'] = idx
return self.download_and_append_fragments(ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func, tpe=tpe, interrupt_trigger=interrupt_trigger)
class FTPE(concurrent.futures.ThreadPoolExecutor):
def __exit__(self, exc_type, exc_val, exc_tb):
pass
if (compat_os_name == 'nt'):
def future_result(future):
while True:
try:
return future.result(0.1)
except KeyboardInterrupt:
raise
except concurrent.futures.TimeoutError:
continue
else:
def future_result(future):
return future.result()
def interrupt_trigger_iter(fg):
for f in fg:
if (not interrupt_trigger[0]):
break
(yield f)
spins = []
for (idx, (ctx, fragments, info_dict)) in enumerate(args):
tpe = FTPE(math.ceil((max_workers / max_progress)))
job = tpe.submit(thread_func, idx, ctx, interrupt_trigger_iter(fragments), info_dict, tpe)
spins.append((tpe, job))
result = True
for (tpe, job) in spins:
try:
result = (result and future_result(job))
except KeyboardInterrupt:
interrupt_trigger[0] = False
finally:
tpe.shutdown(wait=True)
if ((not interrupt_trigger[0]) and (not is_live)):
raise KeyboardInterrupt()
return result |
def createFiles(self, fileContents, path, repMap=None, repMaps=None):
'repMap: single map for all files\n repMaps: a dict, with the filenames as the keys'
if ((repMap is not None) and (repMaps is not None)):
raise AllInOneError('createFiles can only take repMap or repMaps (or neither), not both')
result = []
for fileName in fileContents:
filePath = os.path.join(path, fileName)
result.append(filePath)
for (i, filePathi) in enumerate(addIndex(filePath, self.NJobs)):
theFile = open(filePathi, 'w')
fileContentsi = fileContents[fileName]
if (repMaps is not None):
repMap = repMaps[fileName]
if (repMap is not None):
repMap.update({'nIndex': str(i)})
fileContentsi = replaceByMap(fileContentsi, repMap)
theFile.write(fileContentsi)
theFile.close()
return result | -1,049,011,689,998,130,600 | repMap: single map for all files
repMaps: a dict, with the filenames as the keys | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | createFiles | 4quarks/cmssw | python | def createFiles(self, fileContents, path, repMap=None, repMaps=None):
'repMap: single map for all files\n repMaps: a dict, with the filenames as the keys'
if ((repMap is not None) and (repMaps is not None)):
raise AllInOneError('createFiles can only take repMap or repMaps (or neither), not both')
result = []
for fileName in fileContents:
filePath = os.path.join(path, fileName)
result.append(filePath)
for (i, filePathi) in enumerate(addIndex(filePath, self.NJobs)):
theFile = open(filePathi, 'w')
fileContentsi = fileContents[fileName]
if (repMaps is not None):
repMap = repMaps[fileName]
if (repMap is not None):
repMap.update({'nIndex': str(i)})
fileContentsi = replaceByMap(fileContentsi, repMap)
theFile.write(fileContentsi)
theFile.close()
return result |
def __init__(self, valName, alignment, config):
'\n This method adds additional items to the `self.general` dictionary\n which are only needed for validations using datasets.\n \n Arguments:\n - `valName`: String which identifies individual validation instances\n - `alignment`: `Alignment` instance to validate\n - `config`: `BetterConfigParser` instance which includes the\n configuration of the validations\n '
super(GenericValidationData, self).__init__(valName, alignment, config)
if ((int(self.general['maxevents']) < 0) and (self.NJobs > 1)):
msg = 'Maximum number of events (maxevents) not specified: cannot use parallel jobs.'
raise AllInOneError(msg)
if ((int(self.general['maxevents']) / self.NJobs) != (float(self.general['maxevents']) / self.NJobs)):
msg = 'maxevents has to be divisible by parallelJobs'
raise AllInOneError(msg)
tryPredefinedFirst = ((not (self.jobmode.split(',')[0] == 'crab')) and (self.general['JSON'] == '') and (self.general['firstRun'] == '') and (self.general['lastRun'] == '') and (self.general['begin'] == '') and (self.general['end'] == ''))
if (self.general['dataset'] not in globalDictionaries.usedDatasets):
globalDictionaries.usedDatasets[self.general['dataset']] = {}
if (self.cmssw not in globalDictionaries.usedDatasets[self.general['dataset']]):
if (globalDictionaries.usedDatasets[self.general['dataset']] != {}):
print(("Warning: you use the same dataset '%s' in multiple cmssw releases.\nThis is allowed, but make sure it's not a mistake" % self.general['dataset']))
globalDictionaries.usedDatasets[self.general['dataset']][self.cmssw] = {False: None, True: None}
Bfield = self.general.get('magneticfield', None)
if (globalDictionaries.usedDatasets[self.general['dataset']][self.cmssw][tryPredefinedFirst] is None):
dataset = Dataset(self.general['dataset'], tryPredefinedFirst=tryPredefinedFirst, cmssw=self.cmssw, cmsswrelease=self.cmsswreleasebase, magneticfield=Bfield, dasinstance=self.general['dasinstance'])
globalDictionaries.usedDatasets[self.general['dataset']][self.cmssw][tryPredefinedFirst] = dataset
if (tryPredefinedFirst and (not dataset.predefined())):
globalDictionaries.usedDatasets[self.general['dataset']][self.cmssw][False] = dataset
self.dataset = globalDictionaries.usedDatasets[self.general['dataset']][self.cmssw][tryPredefinedFirst]
self.general['magneticField'] = self.dataset.magneticField()
self.general['defaultMagneticField'] = 'MagneticField'
if (self.general['magneticField'] == 'unknown'):
print('Could not get the magnetic field for this dataset.')
print('Using the default: ', self.general['defaultMagneticField'])
self.general['magneticField'] = '.oO[defaultMagneticField]Oo.'
if (not (self.jobmode.split(',')[0] == 'crab')):
try:
self.general['datasetDefinition'] = self.dataset.datasetSnippet(jsonPath=self.general['JSON'], firstRun=self.general['firstRun'], lastRun=self.general['lastRun'], begin=self.general['begin'], end=self.general['end'], parent=self.needParentFiles)
except AllInOneError as e:
msg = ('In section [%s:%s]: ' % (self.valType, self.name))
msg += str(e)
raise AllInOneError(msg)
else:
if self.dataset.predefined():
msg = ("For jobmode 'crab' you cannot use predefined datasets (in your case: '%s')." % self.dataset.name())
raise AllInOneError(msg)
try:
theUpdate = config.getResultingSection(((self.valType + ':') + self.name), demandPars=['parallelJobs'])
except AllInOneError as e:
msg = (str(e)[:(- 1)] + " when using 'jobmode: crab'.")
raise AllInOneError(msg)
self.general.update(theUpdate)
if (self.general['begin'] or self.general['end']):
(self.general['begin'], self.general['end'], self.general['firstRun'], self.general['lastRun']) = self.dataset.convertTimeToRun(firstRun=self.general['firstRun'], lastRun=self.general['lastRun'], begin=self.general['begin'], end=self.general['end'], shortTuple=False)
if (self.general['begin'] == None):
self.general['begin'] = ''
if (self.general['end'] == None):
self.general['end'] = ''
self.general['firstRun'] = str(self.general['firstRun'])
self.general['lastRun'] = str(self.general['lastRun'])
if ((not self.general['firstRun']) and (self.general['end'] or self.general['lastRun'])):
self.general['firstRun'] = str(self.dataset.runList()[0]['run_number'])
if ((not self.general['lastRun']) and (self.general['begin'] or self.general['firstRun'])):
self.general['lastRun'] = str(self.dataset.runList()[(- 1)]['run_number'])
if (self.general['firstRun'] and self.general['lastRun']):
if (int(self.general['firstRun']) > int(self.general['lastRun'])):
msg = "The lower time/runrange limit ('begin'/'firstRun') chosen is greater than the upper time/runrange limit ('end'/'lastRun')."
raise AllInOneError(msg)
self.general['runRange'] = ((self.general['firstRun'] + '-') + self.general['lastRun'])
try:
self.general['datasetDefinition'] = self.dataset.datasetSnippet(jsonPath=self.general['JSON'], firstRun=self.general['firstRun'], lastRun=self.general['lastRun'], begin=self.general['begin'], end=self.general['end'], crab=True)
except AllInOneError as e:
msg = ('In section [%s:%s]: ' % (self.valType, self.name))
msg += str(e)
raise AllInOneError(msg)
self.general['usepixelqualityflag'] = pythonboolstring(self.general['usepixelqualityflag'], 'usepixelqualityflag') | -8,920,607,103,725,009,000 | This method adds additional items to the `self.general` dictionary
which are only needed for validations using datasets.
Arguments:
- `valName`: String which identifies individual validation instances
- `alignment`: `Alignment` instance to validate
- `config`: `BetterConfigParser` instance which includes the
configuration of the validations | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | __init__ | 4quarks/cmssw | python | def __init__(self, valName, alignment, config):
'\n This method adds additional items to the `self.general` dictionary\n which are only needed for validations using datasets.\n \n Arguments:\n - `valName`: String which identifies individual validation instances\n - `alignment`: `Alignment` instance to validate\n - `config`: `BetterConfigParser` instance which includes the\n configuration of the validations\n '
super(GenericValidationData, self).__init__(valName, alignment, config)
if ((int(self.general['maxevents']) < 0) and (self.NJobs > 1)):
msg = 'Maximum number of events (maxevents) not specified: cannot use parallel jobs.'
raise AllInOneError(msg)
if ((int(self.general['maxevents']) / self.NJobs) != (float(self.general['maxevents']) / self.NJobs)):
msg = 'maxevents has to be divisible by parallelJobs'
raise AllInOneError(msg)
tryPredefinedFirst = ((not (self.jobmode.split(',')[0] == 'crab')) and (self.general['JSON'] == ) and (self.general['firstRun'] == ) and (self.general['lastRun'] == ) and (self.general['begin'] == ) and (self.general['end'] == ))
if (self.general['dataset'] not in globalDictionaries.usedDatasets):
globalDictionaries.usedDatasets[self.general['dataset']] = {}
if (self.cmssw not in globalDictionaries.usedDatasets[self.general['dataset']]):
if (globalDictionaries.usedDatasets[self.general['dataset']] != {}):
print(("Warning: you use the same dataset '%s' in multiple cmssw releases.\nThis is allowed, but make sure it's not a mistake" % self.general['dataset']))
globalDictionaries.usedDatasets[self.general['dataset']][self.cmssw] = {False: None, True: None}
Bfield = self.general.get('magneticfield', None)
if (globalDictionaries.usedDatasets[self.general['dataset']][self.cmssw][tryPredefinedFirst] is None):
dataset = Dataset(self.general['dataset'], tryPredefinedFirst=tryPredefinedFirst, cmssw=self.cmssw, cmsswrelease=self.cmsswreleasebase, magneticfield=Bfield, dasinstance=self.general['dasinstance'])
globalDictionaries.usedDatasets[self.general['dataset']][self.cmssw][tryPredefinedFirst] = dataset
if (tryPredefinedFirst and (not dataset.predefined())):
globalDictionaries.usedDatasets[self.general['dataset']][self.cmssw][False] = dataset
self.dataset = globalDictionaries.usedDatasets[self.general['dataset']][self.cmssw][tryPredefinedFirst]
self.general['magneticField'] = self.dataset.magneticField()
self.general['defaultMagneticField'] = 'MagneticField'
if (self.general['magneticField'] == 'unknown'):
print('Could not get the magnetic field for this dataset.')
print('Using the default: ', self.general['defaultMagneticField'])
self.general['magneticField'] = '.oO[defaultMagneticField]Oo.'
if (not (self.jobmode.split(',')[0] == 'crab')):
try:
self.general['datasetDefinition'] = self.dataset.datasetSnippet(jsonPath=self.general['JSON'], firstRun=self.general['firstRun'], lastRun=self.general['lastRun'], begin=self.general['begin'], end=self.general['end'], parent=self.needParentFiles)
except AllInOneError as e:
msg = ('In section [%s:%s]: ' % (self.valType, self.name))
msg += str(e)
raise AllInOneError(msg)
else:
if self.dataset.predefined():
msg = ("For jobmode 'crab' you cannot use predefined datasets (in your case: '%s')." % self.dataset.name())
raise AllInOneError(msg)
try:
theUpdate = config.getResultingSection(((self.valType + ':') + self.name), demandPars=['parallelJobs'])
except AllInOneError as e:
msg = (str(e)[:(- 1)] + " when using 'jobmode: crab'.")
raise AllInOneError(msg)
self.general.update(theUpdate)
if (self.general['begin'] or self.general['end']):
(self.general['begin'], self.general['end'], self.general['firstRun'], self.general['lastRun']) = self.dataset.convertTimeToRun(firstRun=self.general['firstRun'], lastRun=self.general['lastRun'], begin=self.general['begin'], end=self.general['end'], shortTuple=False)
if (self.general['begin'] == None):
self.general['begin'] =
if (self.general['end'] == None):
self.general['end'] =
self.general['firstRun'] = str(self.general['firstRun'])
self.general['lastRun'] = str(self.general['lastRun'])
if ((not self.general['firstRun']) and (self.general['end'] or self.general['lastRun'])):
self.general['firstRun'] = str(self.dataset.runList()[0]['run_number'])
if ((not self.general['lastRun']) and (self.general['begin'] or self.general['firstRun'])):
self.general['lastRun'] = str(self.dataset.runList()[(- 1)]['run_number'])
if (self.general['firstRun'] and self.general['lastRun']):
if (int(self.general['firstRun']) > int(self.general['lastRun'])):
msg = "The lower time/runrange limit ('begin'/'firstRun') chosen is greater than the upper time/runrange limit ('end'/'lastRun')."
raise AllInOneError(msg)
self.general['runRange'] = ((self.general['firstRun'] + '-') + self.general['lastRun'])
try:
self.general['datasetDefinition'] = self.dataset.datasetSnippet(jsonPath=self.general['JSON'], firstRun=self.general['firstRun'], lastRun=self.general['lastRun'], begin=self.general['begin'], end=self.general['end'], crab=True)
except AllInOneError as e:
msg = ('In section [%s:%s]: ' % (self.valType, self.name))
msg += str(e)
raise AllInOneError(msg)
self.general['usepixelqualityflag'] = pythonboolstring(self.general['usepixelqualityflag'], 'usepixelqualityflag') |
def createCrabCfg(self, path, crabCfgBaseName):
'\n Method which creates a `crab.cfg` for a validation on datasets.\n \n Arguments:\n - `path`: Path at which the file will be stored.\n - `crabCfgBaseName`: String which depends on the actual type of\n validation calling this method.\n '
crabCfgName = ('crab.%s.%s.%s.cfg' % (crabCfgBaseName, self.name, self.alignmentToValidate.name))
repMap = self.getRepMap()
repMap['script'] = 'dummy_script.sh'
repMap['crabWorkingDir'] = crabCfgName.split('.cfg')[0]
self.crabWorkingDir = repMap['crabWorkingDir']
repMap['numberOfJobs'] = self.general['parallelJobs']
repMap['cfgFile'] = self.configFiles[0]
repMap['queue'] = self.jobmode.split(',')[1].split('-q')[1]
if (self.dataset.dataType() == 'mc'):
repMap['McOrData'] = 'events = .oO[nEvents]Oo.'
elif (self.dataset.dataType() == 'data'):
repMap['McOrData'] = 'lumis = -1'
if (self.jobmode.split(',')[0] == 'crab'):
print("For jobmode 'crab' the parameter 'maxevents' will be ignored and all events will be processed.")
else:
raise AllInOneError("Unknown data type! Can't run in crab mode")
crabCfg = {crabCfgName: replaceByMap(configTemplates.crabCfgTemplate, repMap)}
return super(GenericValidationData, self).createCrabCfg(crabCfg, path) | -6,388,444,098,593,256,000 | Method which creates a `crab.cfg` for a validation on datasets.
Arguments:
- `path`: Path at which the file will be stored.
- `crabCfgBaseName`: String which depends on the actual type of
validation calling this method. | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | createCrabCfg | 4quarks/cmssw | python | def createCrabCfg(self, path, crabCfgBaseName):
'\n Method which creates a `crab.cfg` for a validation on datasets.\n \n Arguments:\n - `path`: Path at which the file will be stored.\n - `crabCfgBaseName`: String which depends on the actual type of\n validation calling this method.\n '
crabCfgName = ('crab.%s.%s.%s.cfg' % (crabCfgBaseName, self.name, self.alignmentToValidate.name))
repMap = self.getRepMap()
repMap['script'] = 'dummy_script.sh'
repMap['crabWorkingDir'] = crabCfgName.split('.cfg')[0]
self.crabWorkingDir = repMap['crabWorkingDir']
repMap['numberOfJobs'] = self.general['parallelJobs']
repMap['cfgFile'] = self.configFiles[0]
repMap['queue'] = self.jobmode.split(',')[1].split('-q')[1]
if (self.dataset.dataType() == 'mc'):
repMap['McOrData'] = 'events = .oO[nEvents]Oo.'
elif (self.dataset.dataType() == 'data'):
repMap['McOrData'] = 'lumis = -1'
if (self.jobmode.split(',')[0] == 'crab'):
print("For jobmode 'crab' the parameter 'maxevents' will be ignored and all events will be processed.")
else:
raise AllInOneError("Unknown data type! Can't run in crab mode")
crabCfg = {crabCfgName: replaceByMap(configTemplates.crabCfgTemplate, repMap)}
return super(GenericValidationData, self).createCrabCfg(crabCfg, path) |
@abstractmethod
def plottingscriptname(cls):
'override with a classmethod' | -5,638,076,648,143,060,000 | override with a classmethod | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | plottingscriptname | 4quarks/cmssw | python | @abstractmethod
def plottingscriptname(cls):
|
@abstractmethod
def plottingscripttemplate(cls):
'override with a classmethod' | -1,264,907,108,993,485,600 | override with a classmethod | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | plottingscripttemplate | 4quarks/cmssw | python | @abstractmethod
def plottingscripttemplate(cls):
|
@abstractmethod
def plotsdirname(cls):
'override with a classmethod' | -2,828,980,906,335,749,000 | override with a classmethod | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | plotsdirname | 4quarks/cmssw | python | @abstractmethod
def plotsdirname(cls):
|
@abstractmethod
def getsummaryitems(cls, folder):
'override with a classmethod that returns a list of SummaryItems\n based on the plots saved in folder' | 5,501,817,813,577,775,000 | override with a classmethod that returns a list of SummaryItems
based on the plots saved in folder | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | getsummaryitems | 4quarks/cmssw | python | @abstractmethod
def getsummaryitems(cls, folder):
'override with a classmethod that returns a list of SummaryItems\n based on the plots saved in folder' |
@abstractmethod
def comparealignmentsname(cls):
'classmethod' | -6,744,047,084,482,538,000 | classmethod | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | comparealignmentsname | 4quarks/cmssw | python | @abstractmethod
def comparealignmentsname(cls):
|
@abstractmethod
def presentationsubsections(cls):
'classmethod' | -1,757,039,647,671,784,700 | classmethod | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | presentationsubsections | 4quarks/cmssw | python | @abstractmethod
def presentationsubsections(cls):
|
def __init__(self, name, values, format=None, latexname=None, latexformat=None):
'\n name: name of the summary item, goes on top of the column\n values: value for each alignment (in order of rows)\n format: python format string (default: {:.3g}, meaning up to 3 significant digits)\n latexname: name in latex form, e.g. if name=sigma you might want latexname=\\sigma (default: name)\n latexformat: format for latex (default: format)\n '
if (format is None):
format = '{:.3g}'
if (latexname is None):
latexname = name
if (latexformat is None):
latexformat = format
self.__name = name
self.__values = values
self.__format = format
self.__latexname = latexname
self.__latexformat = latexformat | 9,029,562,683,415,845,000 | name: name of the summary item, goes on top of the column
values: value for each alignment (in order of rows)
format: python format string (default: {:.3g}, meaning up to 3 significant digits)
latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name)
latexformat: format for latex (default: format) | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | __init__ | 4quarks/cmssw | python | def __init__(self, name, values, format=None, latexname=None, latexformat=None):
'\n name: name of the summary item, goes on top of the column\n values: value for each alignment (in order of rows)\n format: python format string (default: {:.3g}, meaning up to 3 significant digits)\n latexname: name in latex form, e.g. if name=sigma you might want latexname=\\sigma (default: name)\n latexformat: format for latex (default: format)\n '
if (format is None):
format = '{:.3g}'
if (latexname is None):
latexname = name
if (latexformat is None):
latexformat = format
self.__name = name
self.__values = values
self.__format = format
self.__latexname = latexname
self.__latexformat = latexformat |
def __init__(self):
'Constructor.\n '
self.filters = {} | -9,051,548,567,641,834,000 | Constructor. | pylith/apps/ConfigSearchApp.py | __init__ | Shengduo/pylith | python | def __init__(self):
'\n '
self.filters = {} |
def main(self, **kwargs):
'Main entry point.\n\n Keyword arguments:\n searchpath (str), default: "."\n Search path for .cfg files.\n display (str), default: "all"\n List of metadata to display in search results.\n keywords (str), default: None\n Comma delimited list of keywords for filtering search results.\n features (str), default: None\n Comma delimited list of features for filtering search results.\n authors (str), default: None\n Comma delimited list of authors for filtering search results.\n version (str), default: None\n PyLith version for filtering search results.\n '
args = (argparse.Namespace(**kwargs) if kwargs else self._parse_command_line())
self._set_filters(args)
for filename in sorted(pathlib.Path(args.searchpath).glob('**/*.cfg')):
metadata = fromFile(filename)
if metadata:
if (not len(metadata.arguments)):
if args.verbose:
print(f'INFO: Skipping file {filename} with only base metadata.')
continue
filter_fn = (self._apply_filters_incompatible if args.incompatible else self._apply_filters)
if filter_fn(metadata):
self._display_metadata(filename, metadata, args.display)
elif args.verbose:
print(f'MISMATCH: File {filename} did not pass metadata filter.')
elif args.verbose:
print(f'INFO: File {filename} missing simulation metadata.') | 1,933,451,545,898,552,600 | Main entry point.
Keyword arguments:
searchpath (str), default: "."
Search path for .cfg files.
display (str), default: "all"
List of metadata to display in search results.
keywords (str), default: None
Comma delimited list of keywords for filtering search results.
features (str), default: None
Comma delimited list of features for filtering search results.
authors (str), default: None
Comma delimited list of authors for filtering search results.
version (str), default: None
PyLith version for filtering search results. | pylith/apps/ConfigSearchApp.py | main | Shengduo/pylith | python | def main(self, **kwargs):
'Main entry point.\n\n Keyword arguments:\n searchpath (str), default: "."\n Search path for .cfg files.\n display (str), default: "all"\n List of metadata to display in search results.\n keywords (str), default: None\n Comma delimited list of keywords for filtering search results.\n features (str), default: None\n Comma delimited list of features for filtering search results.\n authors (str), default: None\n Comma delimited list of authors for filtering search results.\n version (str), default: None\n PyLith version for filtering search results.\n '
args = (argparse.Namespace(**kwargs) if kwargs else self._parse_command_line())
self._set_filters(args)
for filename in sorted(pathlib.Path(args.searchpath).glob('**/*.cfg')):
metadata = fromFile(filename)
if metadata:
if (not len(metadata.arguments)):
if args.verbose:
print(f'INFO: Skipping file {filename} with only base metadata.')
continue
filter_fn = (self._apply_filters_incompatible if args.incompatible else self._apply_filters)
if filter_fn(metadata):
self._display_metadata(filename, metadata, args.display)
elif args.verbose:
print(f'MISMATCH: File {filename} did not pass metadata filter.')
elif args.verbose:
print(f'INFO: File {filename} missing simulation metadata.') |
def _set_filters(self, options):
'Set filters for display from command line option.\n\n Args:\n options (argsparse.Namespace)\n Command line options.\n '
if options.keywords:
self.filters['keywords'] = string_to_list(options.keywords)
if options.features:
self.filters['features'] = string_to_list(options.features)
if options.authors:
self.filters['authors'] = string_to_list(options.authors)
if options.version:
self.filters['version'] = options.version | -6,979,188,438,464,539,000 | Set filters for display from command line option.
Args:
options (argsparse.Namespace)
Command line options. | pylith/apps/ConfigSearchApp.py | _set_filters | Shengduo/pylith | python | def _set_filters(self, options):
'Set filters for display from command line option.\n\n Args:\n options (argsparse.Namespace)\n Command line options.\n '
if options.keywords:
self.filters['keywords'] = string_to_list(options.keywords)
if options.features:
self.filters['features'] = string_to_list(options.features)
if options.authors:
self.filters['authors'] = string_to_list(options.authors)
if options.version:
self.filters['version'] = options.version |
def _apply_filters(self, metadata):
'Apply filters to metadata.\n\n Args:\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n \n Returns: (bool)\n True if metadata meets filter requirements, False otherwise.\n '
if ('keywords' in self.filters):
if (not metadata.keywords):
return False
if (not all(((keyword in metadata.keywords) for keyword in self.filters['keywords']))):
return False
if ('features' in self.filters):
if (not metadata.features):
return False
if (not all(((feature in metadata.features) for feature in self.filters['features']))):
return False
if ('authors' in self.filters):
if (not metadata.authors):
return False
if (not all(((author in metadata.authors) for author in self.filters['authors']))):
return False
if ('version' in self.filters):
if (not metadata.pylith_version):
return False
for verMeta in metadata.pylith_version:
if (not eval('{ver} {verMeta}'.format(ver=self.filters['version'], verMeta=verMeta))):
return False
return True | 2,894,900,722,805,184,500 | Apply filters to metadata.
Args:
metadata (pylith.utils.SimulationMetadata)
Simulation metadata.
Returns: (bool)
True if metadata meets filter requirements, False otherwise. | pylith/apps/ConfigSearchApp.py | _apply_filters | Shengduo/pylith | python | def _apply_filters(self, metadata):
'Apply filters to metadata.\n\n Args:\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n \n Returns: (bool)\n True if metadata meets filter requirements, False otherwise.\n '
if ('keywords' in self.filters):
if (not metadata.keywords):
return False
if (not all(((keyword in metadata.keywords) for keyword in self.filters['keywords']))):
return False
if ('features' in self.filters):
if (not metadata.features):
return False
if (not all(((feature in metadata.features) for feature in self.filters['features']))):
return False
if ('authors' in self.filters):
if (not metadata.authors):
return False
if (not all(((author in metadata.authors) for author in self.filters['authors']))):
return False
if ('version' in self.filters):
if (not metadata.pylith_version):
return False
for verMeta in metadata.pylith_version:
if (not eval('{ver} {verMeta}'.format(ver=self.filters['version'], verMeta=verMeta))):
return False
return True |
def _apply_filters_incompatible(self, metadata):
'Apply filters to metadata to find incompatible parameter files.\n\n Args:\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n \n Returns: (bool)\n True if metadata is incompatible with filter requirements, False otherwise.\n '
if ('keywords' in self.filters):
if (not metadata.keywords):
return True
if ('features' in self.filters):
if (not ('features' in metadata)):
return True
if ('authors' in self.filters):
if (not ('authors' in metadata)):
return True
if ('version' in self.filters):
if (not metadata.pylith_version):
return True
for verMeta in metadata.pylith_version:
if (not eval('{ver} {verMeta}'.format(ver=self.filters['version'], verMeta=verMeta))):
return True
return False | -2,360,490,715,105,342,000 | Apply filters to metadata to find incompatible parameter files.
Args:
metadata (pylith.utils.SimulationMetadata)
Simulation metadata.
Returns: (bool)
True if metadata is incompatible with filter requirements, False otherwise. | pylith/apps/ConfigSearchApp.py | _apply_filters_incompatible | Shengduo/pylith | python | def _apply_filters_incompatible(self, metadata):
'Apply filters to metadata to find incompatible parameter files.\n\n Args:\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n \n Returns: (bool)\n True if metadata is incompatible with filter requirements, False otherwise.\n '
if ('keywords' in self.filters):
if (not metadata.keywords):
return True
if ('features' in self.filters):
if (not ('features' in metadata)):
return True
if ('authors' in self.filters):
if (not ('authors' in metadata)):
return True
if ('version' in self.filters):
if (not metadata.pylith_version):
return True
for verMeta in metadata.pylith_version:
if (not eval('{ver} {verMeta}'.format(ver=self.filters['version'], verMeta=verMeta))):
return True
return False |
def _display_metadata(self, filename, metadata, options):
'Print metadata to stdout.\n\n Args:\n filename (str)\n Name of simulation .cfg file.\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n options (list of str)\n List of metadata to display.\n '
INDENT = (' ' * 4)
show_all = ('all' in options)
options = string_to_list(options)
line0 = f'{filename}'
if (('version' in options) or show_all):
if metadata.version:
line0 += f' v{metadata.version}'
else:
line0 += " missing 'version'"
if (('pylith_version' in options) or show_all):
if metadata.pylith_version:
line0 += ('; requires PyLith ' + ' and '.join(metadata.pylith_version))
else:
line0 += "; missing 'pylith_version'"
lines = []
if (('description' in options) or show_all):
if metadata.description:
lines += [metadata.description]
else:
lines += ["missing 'description'"]
if (('authors' in options) or show_all):
if metadata.authors:
lines += [('Authors: ' + ', '.join(metadata.authors))]
else:
lines += ["missing 'authors'"]
if (('keywords' in options) or show_all):
if metadata.keywords:
lines += [('Keywords: ' + ', '.join(metadata.keywords))]
else:
lines += ["missing 'keywords'"]
if (('features' in options) or show_all):
if metadata.features:
features = textwrap.fill(', '.join(metadata.features), width=120)
lines += (['Features:'] + textwrap.indent(features, INDENT).split('\n'))
else:
lines += ["missing 'features'"]
if (('arguments' in options) or show_all):
if metadata.arguments:
lines += [('pylith ' + ' '.join(metadata.arguments))]
else:
lines += ["missing 'arguments'"]
print(line0)
if len(lines):
print(textwrap.indent('\n'.join(lines), INDENT)) | -8,794,836,628,804,046,000 | Print metadata to stdout.
Args:
filename (str)
Name of simulation .cfg file.
metadata (pylith.utils.SimulationMetadata)
Simulation metadata.
options (list of str)
List of metadata to display. | pylith/apps/ConfigSearchApp.py | _display_metadata | Shengduo/pylith | python | def _display_metadata(self, filename, metadata, options):
'Print metadata to stdout.\n\n Args:\n filename (str)\n Name of simulation .cfg file.\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n options (list of str)\n List of metadata to display.\n '
INDENT = (' ' * 4)
show_all = ('all' in options)
options = string_to_list(options)
line0 = f'{filename}'
if (('version' in options) or show_all):
if metadata.version:
line0 += f' v{metadata.version}'
else:
line0 += " missing 'version'"
if (('pylith_version' in options) or show_all):
if metadata.pylith_version:
line0 += ('; requires PyLith ' + ' and '.join(metadata.pylith_version))
else:
line0 += "; missing 'pylith_version'"
lines = []
if (('description' in options) or show_all):
if metadata.description:
lines += [metadata.description]
else:
lines += ["missing 'description'"]
if (('authors' in options) or show_all):
if metadata.authors:
lines += [('Authors: ' + ', '.join(metadata.authors))]
else:
lines += ["missing 'authors'"]
if (('keywords' in options) or show_all):
if metadata.keywords:
lines += [('Keywords: ' + ', '.join(metadata.keywords))]
else:
lines += ["missing 'keywords'"]
if (('features' in options) or show_all):
if metadata.features:
features = textwrap.fill(', '.join(metadata.features), width=120)
lines += (['Features:'] + textwrap.indent(features, INDENT).split('\n'))
else:
lines += ["missing 'features'"]
if (('arguments' in options) or show_all):
if metadata.arguments:
lines += [('pylith ' + ' '.join(metadata.arguments))]
else:
lines += ["missing 'arguments'"]
print(line0)
if len(lines):
print(textwrap.indent('\n'.join(lines), INDENT)) |
def _parse_command_line(self):
'Parse command line arguments.\n\n Returns (argsparse.Namespace)\n Command line arguments.\n '
DESCRIPTION = 'Application for searching PyLith .cfg parameter files.'
parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--path', action='store', dest='searchpath', default='.', help='Search path for .cfg files.')
parser.add_argument('--display', action='store', dest='display', default='all', help='List of metadata to display in search results.')
parser.add_argument('--verbose', action='store_true', dest='verbose', help='Report missing metadata.')
parser.add_argument('--keywords', action='store', dest='keywords', help='Comma delimited list of keywords for filtering search results.')
parser.add_argument('--features', action='store', dest='features', help='Comma delimited list of features for filtering search results.')
parser.add_argument('--authors', action='store', dest='authors', help='Comma delimited list of authors for filtering search results.')
parser.add_argument('--version', action='store', dest='version', help='PyLith version for filtering search results.')
parser.add_argument('--incompatible', action='store_true', dest='incompatible', help='Filter search results to show incompatible parameter files.')
args = parser.parse_args()
return args | -7,304,533,193,928,211,000 | Parse command line arguments.
Returns (argsparse.Namespace)
Command line arguments. | pylith/apps/ConfigSearchApp.py | _parse_command_line | Shengduo/pylith | python | def _parse_command_line(self):
'Parse command line arguments.\n\n Returns (argsparse.Namespace)\n Command line arguments.\n '
DESCRIPTION = 'Application for searching PyLith .cfg parameter files.'
parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--path', action='store', dest='searchpath', default='.', help='Search path for .cfg files.')
parser.add_argument('--display', action='store', dest='display', default='all', help='List of metadata to display in search results.')
parser.add_argument('--verbose', action='store_true', dest='verbose', help='Report missing metadata.')
parser.add_argument('--keywords', action='store', dest='keywords', help='Comma delimited list of keywords for filtering search results.')
parser.add_argument('--features', action='store', dest='features', help='Comma delimited list of features for filtering search results.')
parser.add_argument('--authors', action='store', dest='authors', help='Comma delimited list of authors for filtering search results.')
parser.add_argument('--version', action='store', dest='version', help='PyLith version for filtering search results.')
parser.add_argument('--incompatible', action='store_true', dest='incompatible', help='Filter search results to show incompatible parameter files.')
args = parser.parse_args()
return args |
def list_user_usage(self, **kwargs):
'\n A list of usage user entries\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.list_user_usage(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A list of names.\n :param str filter: The filter to be used for query.\n :param int limit: limit, should be >= 0\n :param str sort: The way to order the results.\n :param int start: start\n :param str token: token\n :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.\n :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter.\n :return: QuotasUserResponse\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_user_usage_with_http_info(**kwargs)
else:
data = self.list_user_usage_with_http_info(**kwargs)
return data | 505,787,424,465,485,060 | A list of usage user entries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_user_usage(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A list of names.
:param str filter: The filter to be used for query.
:param int limit: limit, should be >= 0
:param str sort: The way to order the results.
:param int start: start
:param str token: token
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter.
:return: QuotasUserResponse
If the method is called asynchronously,
returns the request thread. | purity_fb/purity_fb_1dot6/apis/usage_users_api.py | list_user_usage | unixtreme/purity_fb_python_client | python | def list_user_usage(self, **kwargs):
'\n A list of usage user entries\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.list_user_usage(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A list of names.\n :param str filter: The filter to be used for query.\n :param int limit: limit, should be >= 0\n :param str sort: The way to order the results.\n :param int start: start\n :param str token: token\n :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.\n :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter.\n :return: QuotasUserResponse\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_user_usage_with_http_info(**kwargs)
else:
data = self.list_user_usage_with_http_info(**kwargs)
return data |
def list_user_usage_with_http_info(self, **kwargs):
'\n A list of usage user entries\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.list_user_usage_with_http_info(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A list of names.\n :param str filter: The filter to be used for query.\n :param int limit: limit, should be >= 0\n :param str sort: The way to order the results.\n :param int start: start\n :param str token: token\n :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.\n :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter.\n :return: QuotasUserResponse\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['names', 'filter', 'limit', 'sort', 'start', 'token', 'file_system_names', 'uids']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method list_user_usage" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if ('names' in params):
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if ('filter' in params):
query_params.append(('filter', params['filter']))
if ('limit' in params):
query_params.append(('limit', params['limit']))
if ('sort' in params):
query_params.append(('sort', params['sort']))
if ('start' in params):
query_params.append(('start', params['start']))
if ('token' in params):
query_params.append(('token', params['token']))
if ('file_system_names' in params):
query_params.append(('file_system_names', params['file_system_names']))
collection_formats['file_system_names'] = 'csv'
if ('uids' in params):
query_params.append(('uids', params['uids']))
collection_formats['uids'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.6/usage/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='QuotasUserResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) | 5,569,943,251,025,442,000 | A list of usage user entries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_user_usage_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A list of names.
:param str filter: The filter to be used for query.
:param int limit: limit, should be >= 0
:param str sort: The way to order the results.
:param int start: start
:param str token: token
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter.
:return: QuotasUserResponse
If the method is called asynchronously,
returns the request thread. | purity_fb/purity_fb_1dot6/apis/usage_users_api.py | list_user_usage_with_http_info | unixtreme/purity_fb_python_client | python | def list_user_usage_with_http_info(self, **kwargs):
'\n A list of usage user entries\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.list_user_usage_with_http_info(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A list of names.\n :param str filter: The filter to be used for query.\n :param int limit: limit, should be >= 0\n :param str sort: The way to order the results.\n :param int start: start\n :param str token: token\n :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.\n :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter.\n :return: QuotasUserResponse\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['names', 'filter', 'limit', 'sort', 'start', 'token', 'file_system_names', 'uids']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method list_user_usage" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if ('names' in params):
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if ('filter' in params):
query_params.append(('filter', params['filter']))
if ('limit' in params):
query_params.append(('limit', params['limit']))
if ('sort' in params):
query_params.append(('sort', params['sort']))
if ('start' in params):
query_params.append(('start', params['start']))
if ('token' in params):
query_params.append(('token', params['token']))
if ('file_system_names' in params):
query_params.append(('file_system_names', params['file_system_names']))
collection_formats['file_system_names'] = 'csv'
if ('uids' in params):
query_params.append(('uids', params['uids']))
collection_formats['uids'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.6/usage/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='QuotasUserResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) |
def run_and_check_derivs(self, prob, tol=1e-05, verbose=False):
'Runs check_partials and compares to analytic derivatives.'
prob.run_model()
derivs = prob.check_partials(out_stream=None)
for i in derivs['comp'].keys():
if verbose:
print('Checking derivative pair:', i)
if (derivs['comp'][i]['J_fwd'].sum() != 0.0):
rel_err = derivs['comp'][i]['rel error'][0]
self.assertLessEqual(rel_err, tol) | 9,059,856,035,833,121,000 | Runs check_partials and compares to analytic derivatives. | openmdao/components/tests/test_meta_model_structured_comp.py | run_and_check_derivs | JustinSGray/OpenMDAO | python | def run_and_check_derivs(self, prob, tol=1e-05, verbose=False):
prob.run_model()
derivs = prob.check_partials(out_stream=None)
for i in derivs['comp'].keys():
if verbose:
print('Checking derivative pair:', i)
if (derivs['comp'][i]['J_fwd'].sum() != 0.0):
rel_err = derivs['comp'][i]['rel error'][0]
self.assertLessEqual(rel_err, tol) |
def run_and_check_derivs(self, prob, tol=1e-05, verbose=False):
'Runs check_partials and compares to analytic derivatives.'
prob.run_model()
derivs = prob.check_partials(method='cs', out_stream=None)
for i in derivs['comp'].keys():
if verbose:
print('Checking derivative pair:', i)
if (derivs['comp'][i]['J_fwd'].sum() != 0.0):
rel_err = derivs['comp'][i]['rel error'][0]
self.assertLessEqual(rel_err, tol) | 3,383,656,209,822,107,600 | Runs check_partials and compares to analytic derivatives. | openmdao/components/tests/test_meta_model_structured_comp.py | run_and_check_derivs | JustinSGray/OpenMDAO | python | def run_and_check_derivs(self, prob, tol=1e-05, verbose=False):
prob.run_model()
derivs = prob.check_partials(method='cs', out_stream=None)
for i in derivs['comp'].keys():
if verbose:
print('Checking derivative pair:', i)
if (derivs['comp'][i]['J_fwd'].sum() != 0.0):
rel_err = derivs['comp'][i]['rel error'][0]
self.assertLessEqual(rel_err, tol) |
@property
def keys(self):
'Names of columns.\n\n A tuple of strings that indicate the names of columns.\n '
raise NotImplementedError | 7,768,638,038,528,339,000 | Names of columns.
A tuple of strings that indicate the names of columns. | pytorch_pfn_extras/dataset/tabular/tabular_dataset.py | keys | HiroakiMikami/pytorch-pfn-extras | python | @property
def keys(self):
'Names of columns.\n\n A tuple of strings that indicate the names of columns.\n '
raise NotImplementedError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.