repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
FATE
|
FATE-master/python/fate_arch/protobuf/python/model_service_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: model_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13model_service.proto\x12&com.webank.ai.fate.api.mlmodel.manager\"\x18\n\x05Party\x12\x0f\n\x07partyId\x18\x01 \x03(\t\"*\n\tLocalInfo\x12\x0c\n\x04role\x18\x01 \x01(\t\x12\x0f\n\x07partyId\x18\x02 \x01(\t\"1\n\tModelInfo\x12\x11\n\ttableName\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\"\xd9\x01\n\rRoleModelInfo\x12_\n\rroleModelInfo\x18\x01 \x03(\x0b\x32H.com.webank.ai.fate.api.mlmodel.manager.RoleModelInfo.RoleModelInfoEntry\x1ag\n\x12RoleModelInfoEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12@\n\x05value\x18\x02 \x01(\x0b\x32\x31.com.webank.ai.fate.api.mlmodel.manager.ModelInfo:\x02\x38\x01\"5\n\rUnloadRequest\x12\x11\n\ttableName\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\"5\n\x0eUnloadResponse\x12\x12\n\nstatusCode\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\"H\n\rUnbindRequest\x12\x11\n\tserviceId\x18\x01 \x01(\t\x12\x11\n\ttableName\x18\x02 \x01(\t\x12\x11\n\tnamespace\x18\x03 \x01(\t\"5\n\x0eUnbindResponse\x12\x12\n\nstatusCode\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x85\x01\n\x11QueryModelRequest\x12\x11\n\tserviceId\x18\x01 \x01(\t\x12\x11\n\ttableName\x18\x02 \x01(\t\x12\x11\n\tnamespace\x18\x03 \x01(\t\x12\x12\n\nbeginIndex\x18\x04 \x01(\x05\x12\x10\n\x08\x65ndIndex\x18\x05 \x01(\x05\x12\x11\n\tqueryType\x18\x06 \x01(\x05\"\x0f\n\rModelBindInfo\"f\n\x0bModelInfoEx\x12\x11\n\ttableName\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x11\n\tserviceId\x18\x03 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x04 \x01(\t\x12\r\n\x05index\x18\x05 \x01(\x05\"\x7f\n\x12QueryModelResponse\x12\x0f\n\x07retcode\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12G\n\nmodelInfos\x18\x03 \x03(\x0b\x32\x33.com.webank.ai.fate.api.mlmodel.manager.ModelInfoEx\"\x92\x04\n\x0ePublishRequest\x12@\n\x05local\x18\x01 \x01(\x0b\x32\x31.com.webank.ai.fate.api.mlmodel.manager.LocalInfo\x12N\n\x04role\x18\x02 \x03(\x0b\x32@.com.webank.ai.fate.api.mlmodel.manager.PublishRequest.RoleEntry\x12P\n\x05model\x18\x03 \x03(\x0b\x32\x41.com.webank.ai.fate.api.mlmodel.manager.PublishRequest.ModelEntry\x12\x11\n\tserviceId\x18\x04 \x01(\t\x12\x11\n\ttableName\x18\x05 \x01(\t\x12\x11\n\tnamespace\x18\x06 \x01(\t\x12\x10\n\x08loadType\x18\x07 \x01(\t\x12\x10\n\x08\x66ilePath\x18\x08 \x01(\t\x1aZ\n\tRoleEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.com.webank.ai.fate.api.mlmodel.manager.Party:\x02\x38\x01\x1a\x63\n\nModelEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x44\n\x05value\x18\x02 \x01(\x0b\x32\x35.com.webank.ai.fate.api.mlmodel.manager.RoleModelInfo:\x02\x38\x01\"S\n\x0fPublishResponse\x12\x12\n\nstatusCode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x32\x89\x06\n\x0cModelService\x12~\n\x0bpublishLoad\x12\x36.com.webank.ai.fate.api.mlmodel.manager.PublishRequest\x1a\x37.com.webank.ai.fate.api.mlmodel.manager.PublishResponse\x12~\n\x0bpublishBind\x12\x36.com.webank.ai.fate.api.mlmodel.manager.PublishRequest\x1a\x37.com.webank.ai.fate.api.mlmodel.manager.PublishResponse\x12\x80\x01\n\rpublishOnline\x12\x36.com.webank.ai.fate.api.mlmodel.manager.PublishRequest\x1a\x37.com.webank.ai.fate.api.mlmodel.manager.PublishResponse\x12\x83\x01\n\nqueryModel\x12\x39.com.webank.ai.fate.api.mlmodel.manager.QueryModelRequest\x1a:.com.webank.ai.fate.api.mlmodel.manager.QueryModelResponse\x12w\n\x06unload\x12\x35.com.webank.ai.fate.api.mlmodel.manager.UnloadRequest\x1a\x36.com.webank.ai.fate.api.mlmodel.manager.UnloadResponse\x12w\n\x06unbind\x12\x35.com.webank.ai.fate.api.mlmodel.manager.UnbindRequest\x1a\x36.com.webank.ai.fate.api.mlmodel.manager.UnbindResponseB\x13\x42\x11ModelServiceProtob\x06proto3')
_PARTY = DESCRIPTOR.message_types_by_name['Party']
_LOCALINFO = DESCRIPTOR.message_types_by_name['LocalInfo']
_MODELINFO = DESCRIPTOR.message_types_by_name['ModelInfo']
_ROLEMODELINFO = DESCRIPTOR.message_types_by_name['RoleModelInfo']
_ROLEMODELINFO_ROLEMODELINFOENTRY = _ROLEMODELINFO.nested_types_by_name['RoleModelInfoEntry']
_UNLOADREQUEST = DESCRIPTOR.message_types_by_name['UnloadRequest']
_UNLOADRESPONSE = DESCRIPTOR.message_types_by_name['UnloadResponse']
_UNBINDREQUEST = DESCRIPTOR.message_types_by_name['UnbindRequest']
_UNBINDRESPONSE = DESCRIPTOR.message_types_by_name['UnbindResponse']
_QUERYMODELREQUEST = DESCRIPTOR.message_types_by_name['QueryModelRequest']
_MODELBINDINFO = DESCRIPTOR.message_types_by_name['ModelBindInfo']
_MODELINFOEX = DESCRIPTOR.message_types_by_name['ModelInfoEx']
_QUERYMODELRESPONSE = DESCRIPTOR.message_types_by_name['QueryModelResponse']
_PUBLISHREQUEST = DESCRIPTOR.message_types_by_name['PublishRequest']
_PUBLISHREQUEST_ROLEENTRY = _PUBLISHREQUEST.nested_types_by_name['RoleEntry']
_PUBLISHREQUEST_MODELENTRY = _PUBLISHREQUEST.nested_types_by_name['ModelEntry']
_PUBLISHRESPONSE = DESCRIPTOR.message_types_by_name['PublishResponse']
Party = _reflection.GeneratedProtocolMessageType('Party', (_message.Message,), {
'DESCRIPTOR' : _PARTY,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.Party)
})
_sym_db.RegisterMessage(Party)
LocalInfo = _reflection.GeneratedProtocolMessageType('LocalInfo', (_message.Message,), {
'DESCRIPTOR' : _LOCALINFO,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.LocalInfo)
})
_sym_db.RegisterMessage(LocalInfo)
ModelInfo = _reflection.GeneratedProtocolMessageType('ModelInfo', (_message.Message,), {
'DESCRIPTOR' : _MODELINFO,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.ModelInfo)
})
_sym_db.RegisterMessage(ModelInfo)
RoleModelInfo = _reflection.GeneratedProtocolMessageType('RoleModelInfo', (_message.Message,), {
'RoleModelInfoEntry' : _reflection.GeneratedProtocolMessageType('RoleModelInfoEntry', (_message.Message,), {
'DESCRIPTOR' : _ROLEMODELINFO_ROLEMODELINFOENTRY,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.RoleModelInfo.RoleModelInfoEntry)
})
,
'DESCRIPTOR' : _ROLEMODELINFO,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.RoleModelInfo)
})
_sym_db.RegisterMessage(RoleModelInfo)
_sym_db.RegisterMessage(RoleModelInfo.RoleModelInfoEntry)
UnloadRequest = _reflection.GeneratedProtocolMessageType('UnloadRequest', (_message.Message,), {
'DESCRIPTOR' : _UNLOADREQUEST,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.UnloadRequest)
})
_sym_db.RegisterMessage(UnloadRequest)
UnloadResponse = _reflection.GeneratedProtocolMessageType('UnloadResponse', (_message.Message,), {
'DESCRIPTOR' : _UNLOADRESPONSE,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.UnloadResponse)
})
_sym_db.RegisterMessage(UnloadResponse)
UnbindRequest = _reflection.GeneratedProtocolMessageType('UnbindRequest', (_message.Message,), {
'DESCRIPTOR' : _UNBINDREQUEST,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.UnbindRequest)
})
_sym_db.RegisterMessage(UnbindRequest)
UnbindResponse = _reflection.GeneratedProtocolMessageType('UnbindResponse', (_message.Message,), {
'DESCRIPTOR' : _UNBINDRESPONSE,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.UnbindResponse)
})
_sym_db.RegisterMessage(UnbindResponse)
QueryModelRequest = _reflection.GeneratedProtocolMessageType('QueryModelRequest', (_message.Message,), {
'DESCRIPTOR' : _QUERYMODELREQUEST,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.QueryModelRequest)
})
_sym_db.RegisterMessage(QueryModelRequest)
ModelBindInfo = _reflection.GeneratedProtocolMessageType('ModelBindInfo', (_message.Message,), {
'DESCRIPTOR' : _MODELBINDINFO,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.ModelBindInfo)
})
_sym_db.RegisterMessage(ModelBindInfo)
ModelInfoEx = _reflection.GeneratedProtocolMessageType('ModelInfoEx', (_message.Message,), {
'DESCRIPTOR' : _MODELINFOEX,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.ModelInfoEx)
})
_sym_db.RegisterMessage(ModelInfoEx)
QueryModelResponse = _reflection.GeneratedProtocolMessageType('QueryModelResponse', (_message.Message,), {
'DESCRIPTOR' : _QUERYMODELRESPONSE,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.QueryModelResponse)
})
_sym_db.RegisterMessage(QueryModelResponse)
PublishRequest = _reflection.GeneratedProtocolMessageType('PublishRequest', (_message.Message,), {
'RoleEntry' : _reflection.GeneratedProtocolMessageType('RoleEntry', (_message.Message,), {
'DESCRIPTOR' : _PUBLISHREQUEST_ROLEENTRY,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.PublishRequest.RoleEntry)
})
,
'ModelEntry' : _reflection.GeneratedProtocolMessageType('ModelEntry', (_message.Message,), {
'DESCRIPTOR' : _PUBLISHREQUEST_MODELENTRY,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.PublishRequest.ModelEntry)
})
,
'DESCRIPTOR' : _PUBLISHREQUEST,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.PublishRequest)
})
_sym_db.RegisterMessage(PublishRequest)
_sym_db.RegisterMessage(PublishRequest.RoleEntry)
_sym_db.RegisterMessage(PublishRequest.ModelEntry)
PublishResponse = _reflection.GeneratedProtocolMessageType('PublishResponse', (_message.Message,), {
'DESCRIPTOR' : _PUBLISHRESPONSE,
'__module__' : 'model_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.mlmodel.manager.PublishResponse)
})
_sym_db.RegisterMessage(PublishResponse)
_MODELSERVICE = DESCRIPTOR.services_by_name['ModelService']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'B\021ModelServiceProto'
_ROLEMODELINFO_ROLEMODELINFOENTRY._options = None
_ROLEMODELINFO_ROLEMODELINFOENTRY._serialized_options = b'8\001'
_PUBLISHREQUEST_ROLEENTRY._options = None
_PUBLISHREQUEST_ROLEENTRY._serialized_options = b'8\001'
_PUBLISHREQUEST_MODELENTRY._options = None
_PUBLISHREQUEST_MODELENTRY._serialized_options = b'8\001'
_PARTY._serialized_start=63
_PARTY._serialized_end=87
_LOCALINFO._serialized_start=89
_LOCALINFO._serialized_end=131
_MODELINFO._serialized_start=133
_MODELINFO._serialized_end=182
_ROLEMODELINFO._serialized_start=185
_ROLEMODELINFO._serialized_end=402
_ROLEMODELINFO_ROLEMODELINFOENTRY._serialized_start=299
_ROLEMODELINFO_ROLEMODELINFOENTRY._serialized_end=402
_UNLOADREQUEST._serialized_start=404
_UNLOADREQUEST._serialized_end=457
_UNLOADRESPONSE._serialized_start=459
_UNLOADRESPONSE._serialized_end=512
_UNBINDREQUEST._serialized_start=514
_UNBINDREQUEST._serialized_end=586
_UNBINDRESPONSE._serialized_start=588
_UNBINDRESPONSE._serialized_end=641
_QUERYMODELREQUEST._serialized_start=644
_QUERYMODELREQUEST._serialized_end=777
_MODELBINDINFO._serialized_start=779
_MODELBINDINFO._serialized_end=794
_MODELINFOEX._serialized_start=796
_MODELINFOEX._serialized_end=898
_QUERYMODELRESPONSE._serialized_start=900
_QUERYMODELRESPONSE._serialized_end=1027
_PUBLISHREQUEST._serialized_start=1030
_PUBLISHREQUEST._serialized_end=1560
_PUBLISHREQUEST_ROLEENTRY._serialized_start=1369
_PUBLISHREQUEST_ROLEENTRY._serialized_end=1459
_PUBLISHREQUEST_MODELENTRY._serialized_start=1461
_PUBLISHREQUEST_MODELENTRY._serialized_end=1560
_PUBLISHRESPONSE._serialized_start=1562
_PUBLISHRESPONSE._serialized_end=1645
_MODELSERVICE._serialized_start=1648
_MODELSERVICE._serialized_end=2425
# @@protoc_insertion_point(module_scope)
| 12,834 | 60.706731 | 3,753 |
py
|
FATE
|
FATE-master/python/fate_arch/protobuf/python/fate_data_structure_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: fate-data-structure.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19\x66\x61te-data-structure.proto\x12\x1b\x63om.webank.ai.fate.api.core\"&\n\x08RawEntry\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12\r\n\x05value\x18\x02 \x01(\x0c\"@\n\x06RawMap\x12\x36\n\x07\x65ntries\x18\x01 \x03(\x0b\x32%.com.webank.ai.fate.api.core.RawEntry\"n\n\x04\x44ict\x12\x39\n\x04\x64ict\x18\x01 \x03(\x0b\x32+.com.webank.ai.fate.api.core.Dict.DictEntry\x1a+\n\tDictEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\x42\x0f\x42\rDataStructureb\x06proto3')
_RAWENTRY = DESCRIPTOR.message_types_by_name['RawEntry']
_RAWMAP = DESCRIPTOR.message_types_by_name['RawMap']
_DICT = DESCRIPTOR.message_types_by_name['Dict']
_DICT_DICTENTRY = _DICT.nested_types_by_name['DictEntry']
RawEntry = _reflection.GeneratedProtocolMessageType('RawEntry', (_message.Message,), {
'DESCRIPTOR' : _RAWENTRY,
'__module__' : 'fate_data_structure_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.core.RawEntry)
})
_sym_db.RegisterMessage(RawEntry)
RawMap = _reflection.GeneratedProtocolMessageType('RawMap', (_message.Message,), {
'DESCRIPTOR' : _RAWMAP,
'__module__' : 'fate_data_structure_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.core.RawMap)
})
_sym_db.RegisterMessage(RawMap)
Dict = _reflection.GeneratedProtocolMessageType('Dict', (_message.Message,), {
'DictEntry' : _reflection.GeneratedProtocolMessageType('DictEntry', (_message.Message,), {
'DESCRIPTOR' : _DICT_DICTENTRY,
'__module__' : 'fate_data_structure_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.core.Dict.DictEntry)
})
,
'DESCRIPTOR' : _DICT,
'__module__' : 'fate_data_structure_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.core.Dict)
})
_sym_db.RegisterMessage(Dict)
_sym_db.RegisterMessage(Dict.DictEntry)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'B\rDataStructure'
_DICT_DICTENTRY._options = None
_DICT_DICTENTRY._serialized_options = b'8\001'
_RAWENTRY._serialized_start=58
_RAWENTRY._serialized_end=96
_RAWMAP._serialized_start=98
_RAWMAP._serialized_end=162
_DICT._serialized_start=164
_DICT._serialized_end=274
_DICT_DICTENTRY._serialized_start=231
_DICT_DICTENTRY._serialized_end=274
# @@protoc_insertion_point(module_scope)
| 2,931 | 41.492754 | 563 |
py
|
FATE
|
FATE-master/python/fate_arch/protobuf/python/model_service_pb2_grpc.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import model_service_pb2 as model__service__pb2
class ModelServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.publishLoad = channel.unary_unary(
'/com.webank.ai.fate.api.mlmodel.manager.ModelService/publishLoad',
request_serializer=model__service__pb2.PublishRequest.SerializeToString,
response_deserializer=model__service__pb2.PublishResponse.FromString,
)
self.publishBind = channel.unary_unary(
'/com.webank.ai.fate.api.mlmodel.manager.ModelService/publishBind',
request_serializer=model__service__pb2.PublishRequest.SerializeToString,
response_deserializer=model__service__pb2.PublishResponse.FromString,
)
self.publishOnline = channel.unary_unary(
'/com.webank.ai.fate.api.mlmodel.manager.ModelService/publishOnline',
request_serializer=model__service__pb2.PublishRequest.SerializeToString,
response_deserializer=model__service__pb2.PublishResponse.FromString,
)
self.queryModel = channel.unary_unary(
'/com.webank.ai.fate.api.mlmodel.manager.ModelService/queryModel',
request_serializer=model__service__pb2.QueryModelRequest.SerializeToString,
response_deserializer=model__service__pb2.QueryModelResponse.FromString,
)
self.unload = channel.unary_unary(
'/com.webank.ai.fate.api.mlmodel.manager.ModelService/unload',
request_serializer=model__service__pb2.UnloadRequest.SerializeToString,
response_deserializer=model__service__pb2.UnloadResponse.FromString,
)
self.unbind = channel.unary_unary(
'/com.webank.ai.fate.api.mlmodel.manager.ModelService/unbind',
request_serializer=model__service__pb2.UnbindRequest.SerializeToString,
response_deserializer=model__service__pb2.UnbindResponse.FromString,
)
class ModelServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def publishLoad(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def publishBind(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def publishOnline(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def unload(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def unbind(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ModelServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'publishLoad': grpc.unary_unary_rpc_method_handler(
servicer.publishLoad,
request_deserializer=model__service__pb2.PublishRequest.FromString,
response_serializer=model__service__pb2.PublishResponse.SerializeToString,
),
'publishBind': grpc.unary_unary_rpc_method_handler(
servicer.publishBind,
request_deserializer=model__service__pb2.PublishRequest.FromString,
response_serializer=model__service__pb2.PublishResponse.SerializeToString,
),
'publishOnline': grpc.unary_unary_rpc_method_handler(
servicer.publishOnline,
request_deserializer=model__service__pb2.PublishRequest.FromString,
response_serializer=model__service__pb2.PublishResponse.SerializeToString,
),
'queryModel': grpc.unary_unary_rpc_method_handler(
servicer.queryModel,
request_deserializer=model__service__pb2.QueryModelRequest.FromString,
response_serializer=model__service__pb2.QueryModelResponse.SerializeToString,
),
'unload': grpc.unary_unary_rpc_method_handler(
servicer.unload,
request_deserializer=model__service__pb2.UnloadRequest.FromString,
response_serializer=model__service__pb2.UnloadResponse.SerializeToString,
),
'unbind': grpc.unary_unary_rpc_method_handler(
servicer.unbind,
request_deserializer=model__service__pb2.UnbindRequest.FromString,
response_serializer=model__service__pb2.UnbindResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'com.webank.ai.fate.api.mlmodel.manager.ModelService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ModelService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def publishLoad(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.fate.api.mlmodel.manager.ModelService/publishLoad',
model__service__pb2.PublishRequest.SerializeToString,
model__service__pb2.PublishResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def publishBind(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.fate.api.mlmodel.manager.ModelService/publishBind',
model__service__pb2.PublishRequest.SerializeToString,
model__service__pb2.PublishResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def publishOnline(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.fate.api.mlmodel.manager.ModelService/publishOnline',
model__service__pb2.PublishRequest.SerializeToString,
model__service__pb2.PublishResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def queryModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.fate.api.mlmodel.manager.ModelService/queryModel',
model__service__pb2.QueryModelRequest.SerializeToString,
model__service__pb2.QueryModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def unload(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.fate.api.mlmodel.manager.ModelService/unload',
model__service__pb2.UnloadRequest.SerializeToString,
model__service__pb2.UnloadResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def unbind(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.fate.api.mlmodel.manager.ModelService/unbind',
model__service__pb2.UnbindRequest.SerializeToString,
model__service__pb2.UnbindResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 10,603 | 44.706897 | 131 |
py
|
FATE
|
FATE-master/python/fate_arch/protobuf/python/proxy_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proxy.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import basic_meta_pb2 as basic__meta__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0bproxy.proto\x12*com.webank.ai.eggroll.api.networking.proxy\x1a\x10\x62\x61sic-meta.proto\"&\n\x05Model\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x64\x61taKey\x18\x02 \x01(\t\"X\n\x04Task\x12\x0e\n\x06taskId\x18\x01 \x01(\t\x12@\n\x05model\x18\x02 \x01(\x0b\x32\x31.com.webank.ai.eggroll.api.networking.proxy.Model\"p\n\x05Topic\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07partyId\x18\x02 \x01(\t\x12\x0c\n\x04role\x18\x03 \x01(\t\x12:\n\x08\x63\x61llback\x18\x04 \x01(\x0b\x32(.com.webank.ai.eggroll.api.core.Endpoint\"\x17\n\x07\x43ommand\x12\x0c\n\x04name\x18\x01 \x01(\t\"p\n\x04\x43onf\x12\x16\n\x0eoverallTimeout\x18\x01 \x01(\x03\x12\x1d\n\x15\x63ompletionWaitTimeout\x18\x02 \x01(\x03\x12\x1d\n\x15packetIntervalTimeout\x18\x03 \x01(\x03\x12\x12\n\nmaxRetries\x18\x04 \x01(\x05\"\x9a\x03\n\x08Metadata\x12>\n\x04task\x18\x01 \x01(\x0b\x32\x30.com.webank.ai.eggroll.api.networking.proxy.Task\x12>\n\x03src\x18\x02 \x01(\x0b\x32\x31.com.webank.ai.eggroll.api.networking.proxy.Topic\x12>\n\x03\x64st\x18\x03 \x01(\x0b\x32\x31.com.webank.ai.eggroll.api.networking.proxy.Topic\x12\x44\n\x07\x63ommand\x18\x04 \x01(\x0b\x32\x33.com.webank.ai.eggroll.api.networking.proxy.Command\x12\x10\n\x08operator\x18\x05 \x01(\t\x12\x0b\n\x03seq\x18\x06 \x01(\x03\x12\x0b\n\x03\x61\x63k\x18\x07 \x01(\x03\x12>\n\x04\x63onf\x18\x08 \x01(\x0b\x32\x30.com.webank.ai.eggroll.api.networking.proxy.Conf\x12\x0b\n\x03\x65xt\x18\t \x01(\x0c\x12\x0f\n\x07version\x18\x64 \x01(\t\"\"\n\x04\x44\x61ta\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\"\x8e\x01\n\x06Packet\x12\x44\n\x06header\x18\x01 \x01(\x0b\x32\x34.com.webank.ai.eggroll.api.networking.proxy.Metadata\x12>\n\x04\x62ody\x18\x02 \x01(\x0b\x32\x30.com.webank.ai.eggroll.api.networking.proxy.Data\"\xa3\x01\n\x11HeartbeatResponse\x12\x44\n\x06header\x18\x01 \x01(\x0b\x32\x34.com.webank.ai.eggroll.api.networking.proxy.Metadata\x12H\n\toperation\x18\x02 \x01(\x0e\x32\x35.com.webank.ai.eggroll.api.networking.proxy.Operation\"\xc5\x01\n\x0cPollingFrame\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x0b\n\x03seq\x18\x02 \x01(\x03\x12\x46\n\x08metadata\x18\n \x01(\x0b\x32\x34.com.webank.ai.eggroll.api.networking.proxy.Metadata\x12\x42\n\x06packet\x18\x14 \x01(\x0b\x32\x32.com.webank.ai.eggroll.api.networking.proxy.Packet\x12\x0c\n\x04\x64\x65sc\x18\x1e \x01(\t*O\n\tOperation\x12\t\n\x05START\x10\x00\x12\x07\n\x03RUN\x10\x01\x12\x08\n\x04STOP\x10\x02\x12\x08\n\x04KILL\x10\x03\x12\x0c\n\x08GET_DATA\x10\x04\x12\x0c\n\x08PUT_DATA\x10\x05\x32\xf6\x03\n\x13\x44\x61taTransferService\x12r\n\x04push\x12\x32.com.webank.ai.eggroll.api.networking.proxy.Packet\x1a\x34.com.webank.ai.eggroll.api.networking.proxy.Metadata(\x01\x12r\n\x04pull\x12\x34.com.webank.ai.eggroll.api.networking.proxy.Metadata\x1a\x32.com.webank.ai.eggroll.api.networking.proxy.Packet0\x01\x12s\n\tunaryCall\x12\x32.com.webank.ai.eggroll.api.networking.proxy.Packet\x1a\x32.com.webank.ai.eggroll.api.networking.proxy.Packet\x12\x81\x01\n\x07polling\x12\x38.com.webank.ai.eggroll.api.networking.proxy.PollingFrame\x1a\x38.com.webank.ai.eggroll.api.networking.proxy.PollingFrame(\x01\x30\x01\x32t\n\x0cRouteService\x12\x64\n\x05query\x12\x31.com.webank.ai.eggroll.api.networking.proxy.Topic\x1a(.com.webank.ai.eggroll.api.core.Endpointb\x06proto3')
_OPERATION = DESCRIPTOR.enum_types_by_name['Operation']
Operation = enum_type_wrapper.EnumTypeWrapper(_OPERATION)
START = 0
RUN = 1
STOP = 2
KILL = 3
GET_DATA = 4
PUT_DATA = 5
_MODEL = DESCRIPTOR.message_types_by_name['Model']
_TASK = DESCRIPTOR.message_types_by_name['Task']
_TOPIC = DESCRIPTOR.message_types_by_name['Topic']
_COMMAND = DESCRIPTOR.message_types_by_name['Command']
_CONF = DESCRIPTOR.message_types_by_name['Conf']
_METADATA = DESCRIPTOR.message_types_by_name['Metadata']
_DATA = DESCRIPTOR.message_types_by_name['Data']
_PACKET = DESCRIPTOR.message_types_by_name['Packet']
_HEARTBEATRESPONSE = DESCRIPTOR.message_types_by_name['HeartbeatResponse']
_POLLINGFRAME = DESCRIPTOR.message_types_by_name['PollingFrame']
Model = _reflection.GeneratedProtocolMessageType('Model', (_message.Message,), {
'DESCRIPTOR' : _MODEL,
'__module__' : 'proxy_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.networking.proxy.Model)
})
_sym_db.RegisterMessage(Model)
Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), {
'DESCRIPTOR' : _TASK,
'__module__' : 'proxy_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.networking.proxy.Task)
})
_sym_db.RegisterMessage(Task)
Topic = _reflection.GeneratedProtocolMessageType('Topic', (_message.Message,), {
'DESCRIPTOR' : _TOPIC,
'__module__' : 'proxy_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.networking.proxy.Topic)
})
_sym_db.RegisterMessage(Topic)
Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), {
'DESCRIPTOR' : _COMMAND,
'__module__' : 'proxy_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.networking.proxy.Command)
})
_sym_db.RegisterMessage(Command)
Conf = _reflection.GeneratedProtocolMessageType('Conf', (_message.Message,), {
'DESCRIPTOR' : _CONF,
'__module__' : 'proxy_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.networking.proxy.Conf)
})
_sym_db.RegisterMessage(Conf)
Metadata = _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), {
'DESCRIPTOR' : _METADATA,
'__module__' : 'proxy_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.networking.proxy.Metadata)
})
_sym_db.RegisterMessage(Metadata)
Data = _reflection.GeneratedProtocolMessageType('Data', (_message.Message,), {
'DESCRIPTOR' : _DATA,
'__module__' : 'proxy_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.networking.proxy.Data)
})
_sym_db.RegisterMessage(Data)
Packet = _reflection.GeneratedProtocolMessageType('Packet', (_message.Message,), {
'DESCRIPTOR' : _PACKET,
'__module__' : 'proxy_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.networking.proxy.Packet)
})
_sym_db.RegisterMessage(Packet)
HeartbeatResponse = _reflection.GeneratedProtocolMessageType('HeartbeatResponse', (_message.Message,), {
'DESCRIPTOR' : _HEARTBEATRESPONSE,
'__module__' : 'proxy_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.networking.proxy.HeartbeatResponse)
})
_sym_db.RegisterMessage(HeartbeatResponse)
PollingFrame = _reflection.GeneratedProtocolMessageType('PollingFrame', (_message.Message,), {
'DESCRIPTOR' : _POLLINGFRAME,
'__module__' : 'proxy_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.networking.proxy.PollingFrame)
})
_sym_db.RegisterMessage(PollingFrame)
_DATATRANSFERSERVICE = DESCRIPTOR.services_by_name['DataTransferService']
_ROUTESERVICE = DESCRIPTOR.services_by_name['RouteService']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_OPERATION._serialized_start=1420
_OPERATION._serialized_end=1499
_MODEL._serialized_start=77
_MODEL._serialized_end=115
_TASK._serialized_start=117
_TASK._serialized_end=205
_TOPIC._serialized_start=207
_TOPIC._serialized_end=319
_COMMAND._serialized_start=321
_COMMAND._serialized_end=344
_CONF._serialized_start=346
_CONF._serialized_end=458
_METADATA._serialized_start=461
_METADATA._serialized_end=871
_DATA._serialized_start=873
_DATA._serialized_end=907
_PACKET._serialized_start=910
_PACKET._serialized_end=1052
_HEARTBEATRESPONSE._serialized_start=1055
_HEARTBEATRESPONSE._serialized_end=1218
_POLLINGFRAME._serialized_start=1221
_POLLINGFRAME._serialized_end=1418
_DATATRANSFERSERVICE._serialized_start=1502
_DATATRANSFERSERVICE._serialized_end=2004
_ROUTESERVICE._serialized_start=2006
_ROUTESERVICE._serialized_end=2122
# @@protoc_insertion_point(module_scope)
| 8,556 | 58.839161 | 3,347 |
py
|
FATE
|
FATE-master/python/fate_arch/protobuf/python/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
| 702 | 32.47619 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/protobuf/python/proxy_pb2_grpc.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import basic_meta_pb2 as basic__meta__pb2
import proxy_pb2 as proxy__pb2
class DataTransferServiceStub(object):
"""data transfer service
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.push = channel.stream_unary(
'/com.webank.ai.eggroll.api.networking.proxy.DataTransferService/push',
request_serializer=proxy__pb2.Packet.SerializeToString,
response_deserializer=proxy__pb2.Metadata.FromString,
)
self.pull = channel.unary_stream(
'/com.webank.ai.eggroll.api.networking.proxy.DataTransferService/pull',
request_serializer=proxy__pb2.Metadata.SerializeToString,
response_deserializer=proxy__pb2.Packet.FromString,
)
self.unaryCall = channel.unary_unary(
'/com.webank.ai.eggroll.api.networking.proxy.DataTransferService/unaryCall',
request_serializer=proxy__pb2.Packet.SerializeToString,
response_deserializer=proxy__pb2.Packet.FromString,
)
self.polling = channel.stream_stream(
'/com.webank.ai.eggroll.api.networking.proxy.DataTransferService/polling',
request_serializer=proxy__pb2.PollingFrame.SerializeToString,
response_deserializer=proxy__pb2.PollingFrame.FromString,
)
class DataTransferServiceServicer(object):
"""data transfer service
"""
def push(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def pull(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def unaryCall(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def polling(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DataTransferServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'push': grpc.stream_unary_rpc_method_handler(
servicer.push,
request_deserializer=proxy__pb2.Packet.FromString,
response_serializer=proxy__pb2.Metadata.SerializeToString,
),
'pull': grpc.unary_stream_rpc_method_handler(
servicer.pull,
request_deserializer=proxy__pb2.Metadata.FromString,
response_serializer=proxy__pb2.Packet.SerializeToString,
),
'unaryCall': grpc.unary_unary_rpc_method_handler(
servicer.unaryCall,
request_deserializer=proxy__pb2.Packet.FromString,
response_serializer=proxy__pb2.Packet.SerializeToString,
),
'polling': grpc.stream_stream_rpc_method_handler(
servicer.polling,
request_deserializer=proxy__pb2.PollingFrame.FromString,
response_serializer=proxy__pb2.PollingFrame.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'com.webank.ai.eggroll.api.networking.proxy.DataTransferService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class DataTransferService(object):
"""data transfer service
"""
@staticmethod
def push(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/com.webank.ai.eggroll.api.networking.proxy.DataTransferService/push',
proxy__pb2.Packet.SerializeToString,
proxy__pb2.Metadata.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def pull(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/com.webank.ai.eggroll.api.networking.proxy.DataTransferService/pull',
proxy__pb2.Metadata.SerializeToString,
proxy__pb2.Packet.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def unaryCall(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.eggroll.api.networking.proxy.DataTransferService/unaryCall',
proxy__pb2.Packet.SerializeToString,
proxy__pb2.Packet.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def polling(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/com.webank.ai.eggroll.api.networking.proxy.DataTransferService/polling',
proxy__pb2.PollingFrame.SerializeToString,
proxy__pb2.PollingFrame.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class RouteServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.query = channel.unary_unary(
'/com.webank.ai.eggroll.api.networking.proxy.RouteService/query',
request_serializer=proxy__pb2.Topic.SerializeToString,
response_deserializer=basic__meta__pb2.Endpoint.FromString,
)
class RouteServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def query(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouteServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'query': grpc.unary_unary_rpc_method_handler(
servicer.query,
request_deserializer=proxy__pb2.Topic.FromString,
response_serializer=basic__meta__pb2.Endpoint.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'com.webank.ai.eggroll.api.networking.proxy.RouteService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class RouteService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def query(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.eggroll.api.networking.proxy.RouteService/query',
proxy__pb2.Topic.SerializeToString,
basic__meta__pb2.Endpoint.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 9,317 | 39.337662 | 147 |
py
|
FATE
|
FATE-master/python/fate_arch/protobuf/python/basic_meta_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: basic-meta.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x62\x61sic-meta.proto\x12\x1e\x63om.webank.ai.eggroll.api.core\"6\n\x08\x45ndpoint\x12\n\n\x02ip\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x12\x10\n\x08hostname\x18\x03 \x01(\t\"H\n\tEndpoints\x12;\n\tendpoints\x18\x01 \x03(\x0b\x32(.com.webank.ai.eggroll.api.core.Endpoint\"H\n\x04\x44\x61ta\x12\x0e\n\x06isNull\x18\x01 \x01(\x08\x12\x14\n\x0chostLanguage\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"F\n\x0cRepeatedData\x12\x36\n\x08\x64\x61talist\x18\x01 \x03(\x0b\x32$.com.webank.ai.eggroll.api.core.Data\"u\n\x0b\x43\x61llRequest\x12\x0f\n\x07isAsync\x18\x01 \x01(\x08\x12\x0f\n\x07timeout\x18\x02 \x01(\x03\x12\x0f\n\x07\x63ommand\x18\x03 \x01(\t\x12\x33\n\x05param\x18\x04 \x01(\x0b\x32$.com.webank.ai.eggroll.api.core.Data\"\x88\x01\n\x0c\x43\x61llResponse\x12\x42\n\x0creturnStatus\x18\x01 \x01(\x0b\x32,.com.webank.ai.eggroll.api.core.ReturnStatus\x12\x34\n\x06result\x18\x02 \x01(\x0b\x32$.com.webank.ai.eggroll.api.core.Data\"\"\n\x03Job\x12\r\n\x05jobId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"Y\n\x04Task\x12\x30\n\x03job\x18\x01 \x01(\x0b\x32#.com.webank.ai.eggroll.api.core.Job\x12\x0e\n\x06taskId\x18\x02 \x01(\x03\x12\x0f\n\x07tableId\x18\x03 \x01(\x03\"N\n\x06Result\x12\x32\n\x04task\x18\x01 \x01(\x0b\x32$.com.webank.ai.eggroll.api.core.Task\x12\x10\n\x08resultId\x18\x02 \x01(\x03\"-\n\x0cReturnStatus\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"\xe2\x01\n\x0bSessionInfo\x12\x11\n\tsessionId\x18\x01 \x01(\t\x12\x61\n\x13\x63omputingEngineConf\x18\x02 \x03(\x0b\x32\x44.com.webank.ai.eggroll.api.core.SessionInfo.ComputingEngineConfEntry\x12\x14\n\x0cnamingPolicy\x18\x03 \x01(\t\x12\x0b\n\x03tag\x18\x04 \x01(\t\x1a:\n\x18\x43omputingEngineConfEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x62\x06proto3')
_ENDPOINT = DESCRIPTOR.message_types_by_name['Endpoint']
_ENDPOINTS = DESCRIPTOR.message_types_by_name['Endpoints']
_DATA = DESCRIPTOR.message_types_by_name['Data']
_REPEATEDDATA = DESCRIPTOR.message_types_by_name['RepeatedData']
_CALLREQUEST = DESCRIPTOR.message_types_by_name['CallRequest']
_CALLRESPONSE = DESCRIPTOR.message_types_by_name['CallResponse']
_JOB = DESCRIPTOR.message_types_by_name['Job']
_TASK = DESCRIPTOR.message_types_by_name['Task']
_RESULT = DESCRIPTOR.message_types_by_name['Result']
_RETURNSTATUS = DESCRIPTOR.message_types_by_name['ReturnStatus']
_SESSIONINFO = DESCRIPTOR.message_types_by_name['SessionInfo']
_SESSIONINFO_COMPUTINGENGINECONFENTRY = _SESSIONINFO.nested_types_by_name['ComputingEngineConfEntry']
Endpoint = _reflection.GeneratedProtocolMessageType('Endpoint', (_message.Message,), {
'DESCRIPTOR' : _ENDPOINT,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.Endpoint)
})
_sym_db.RegisterMessage(Endpoint)
Endpoints = _reflection.GeneratedProtocolMessageType('Endpoints', (_message.Message,), {
'DESCRIPTOR' : _ENDPOINTS,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.Endpoints)
})
_sym_db.RegisterMessage(Endpoints)
Data = _reflection.GeneratedProtocolMessageType('Data', (_message.Message,), {
'DESCRIPTOR' : _DATA,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.Data)
})
_sym_db.RegisterMessage(Data)
RepeatedData = _reflection.GeneratedProtocolMessageType('RepeatedData', (_message.Message,), {
'DESCRIPTOR' : _REPEATEDDATA,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.RepeatedData)
})
_sym_db.RegisterMessage(RepeatedData)
CallRequest = _reflection.GeneratedProtocolMessageType('CallRequest', (_message.Message,), {
'DESCRIPTOR' : _CALLREQUEST,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.CallRequest)
})
_sym_db.RegisterMessage(CallRequest)
CallResponse = _reflection.GeneratedProtocolMessageType('CallResponse', (_message.Message,), {
'DESCRIPTOR' : _CALLRESPONSE,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.CallResponse)
})
_sym_db.RegisterMessage(CallResponse)
Job = _reflection.GeneratedProtocolMessageType('Job', (_message.Message,), {
'DESCRIPTOR' : _JOB,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.Job)
})
_sym_db.RegisterMessage(Job)
Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), {
'DESCRIPTOR' : _TASK,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.Task)
})
_sym_db.RegisterMessage(Task)
Result = _reflection.GeneratedProtocolMessageType('Result', (_message.Message,), {
'DESCRIPTOR' : _RESULT,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.Result)
})
_sym_db.RegisterMessage(Result)
ReturnStatus = _reflection.GeneratedProtocolMessageType('ReturnStatus', (_message.Message,), {
'DESCRIPTOR' : _RETURNSTATUS,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.ReturnStatus)
})
_sym_db.RegisterMessage(ReturnStatus)
SessionInfo = _reflection.GeneratedProtocolMessageType('SessionInfo', (_message.Message,), {
'ComputingEngineConfEntry' : _reflection.GeneratedProtocolMessageType('ComputingEngineConfEntry', (_message.Message,), {
'DESCRIPTOR' : _SESSIONINFO_COMPUTINGENGINECONFENTRY,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.SessionInfo.ComputingEngineConfEntry)
})
,
'DESCRIPTOR' : _SESSIONINFO,
'__module__' : 'basic_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.eggroll.api.core.SessionInfo)
})
_sym_db.RegisterMessage(SessionInfo)
_sym_db.RegisterMessage(SessionInfo.ComputingEngineConfEntry)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_SESSIONINFO_COMPUTINGENGINECONFENTRY._options = None
_SESSIONINFO_COMPUTINGENGINECONFENTRY._serialized_options = b'8\001'
_ENDPOINT._serialized_start=52
_ENDPOINT._serialized_end=106
_ENDPOINTS._serialized_start=108
_ENDPOINTS._serialized_end=180
_DATA._serialized_start=182
_DATA._serialized_end=254
_REPEATEDDATA._serialized_start=256
_REPEATEDDATA._serialized_end=326
_CALLREQUEST._serialized_start=328
_CALLREQUEST._serialized_end=445
_CALLRESPONSE._serialized_start=448
_CALLRESPONSE._serialized_end=584
_JOB._serialized_start=586
_JOB._serialized_end=620
_TASK._serialized_start=622
_TASK._serialized_end=711
_RESULT._serialized_start=713
_RESULT._serialized_end=791
_RETURNSTATUS._serialized_start=793
_RETURNSTATUS._serialized_end=838
_SESSIONINFO._serialized_start=841
_SESSIONINFO._serialized_end=1067
_SESSIONINFO_COMPUTINGENGINECONFENTRY._serialized_start=1009
_SESSIONINFO_COMPUTINGENGINECONFENTRY._serialized_end=1067
# @@protoc_insertion_point(module_scope)
| 7,692 | 50.97973 | 1,937 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/_nretry.py
|
#
# Copyright 2022 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from fate_arch.common.log import getLogger
LOGGER = getLogger()
def nretry(func):
"""retry connection
"""
def wrapper(self, *args, **kwargs):
"""wrapper
"""
res = None
exception = None
for ntry in range(10):
try:
res = func(self, *args, **kwargs)
exception = None
break
except Exception as e:
LOGGER.error("function %s error" % func.__name__, exc_info=True)
exception = e
time.sleep(1)
if exception is not None:
LOGGER.debug(
f"failed",
exc_info=exception)
raise exception
return res
return wrapper
| 1,384 | 25.634615 | 80 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/_datastream.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import sys
import json
# Datastream is a wraper of StringIO, it receives kv pairs and dump it to json string
class Datastream(object):
def __init__(self):
self._string = io.StringIO()
self._string.write("[")
def get_size(self):
return sys.getsizeof(self._string.getvalue())
def get_data(self):
self._string.write("]")
return self._string.getvalue()
def append(self, kv: dict):
# add ',' if not the first element
if self._string.getvalue() != "[":
self._string.write(",")
json.dump(kv, self._string)
def clear(self):
self._string.close()
self.__init__()
| 1,299 | 27.888889 | 85 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/_federation.py
|
#
# Copyright 2022 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import sys
import typing
from pickle import dumps as p_dumps, loads as p_loads
from fate_arch.abc import CTableABC
from fate_arch.abc import FederationABC, GarbageCollectionABC
from fate_arch.common import Party
from fate_arch.common.log import getLogger
from fate_arch.federation import FederationDataType
from fate_arch.federation._datastream import Datastream
from fate_arch.session import computing_session
LOGGER = getLogger()
NAME_DTYPE_TAG = "<dtype>"
_SPLIT_ = "^"
def _get_splits(obj, max_message_size):
obj_bytes = p_dumps(obj, protocol=4)
byte_size = len(obj_bytes)
num_slice = (byte_size - 1) // max_message_size + 1
if num_slice <= 1:
return obj, num_slice
else:
_max_size = max_message_size
kv = [(i, obj_bytes[slice(i * _max_size, (i + 1) * _max_size)]) for i in range(num_slice)]
return kv, num_slice
class FederationBase(FederationABC):
@staticmethod
def from_conf(
federation_session_id: str,
party: Party,
runtime_conf: dict,
**kwargs
):
raise NotImplementedError()
def __init__(
self,
session_id,
party: Party,
mq,
max_message_size,
conf=None
):
self._session_id = session_id
self._party = party
self._mq = mq
self._topic_map = {}
self._channels_map = {}
self._name_dtype_map = {}
self._message_cache = {}
self._max_message_size = max_message_size
self._conf = conf
def __getstate__(self):
pass
@property
def session_id(self) -> str:
return self._session_id
def destroy(self, parties):
raise NotImplementedError()
def get(
self, name: str, tag: str, parties: typing.List[Party], gc: GarbageCollectionABC
) -> typing.List:
log_str = f"[federation.get](name={name}, tag={tag}, parties={parties})"
LOGGER.debug(f"[{log_str}]start to get")
_name_dtype_keys = [
_SPLIT_.join([party.role, party.party_id, name, tag, "get"])
for party in parties
]
if _name_dtype_keys[0] not in self._name_dtype_map:
party_topic_infos = self._get_party_topic_infos(parties, dtype=NAME_DTYPE_TAG)
channel_infos = self._get_channels(party_topic_infos=party_topic_infos)
rtn_dtype = []
for i, info in enumerate(channel_infos):
obj = self._receive_obj(
info, name, tag=_SPLIT_.join([tag, NAME_DTYPE_TAG])
)
rtn_dtype.append(obj)
LOGGER.debug(
f"[federation.get] _name_dtype_keys: {_name_dtype_keys}, dtype: {obj}"
)
for k in _name_dtype_keys:
if k not in self._name_dtype_map:
self._name_dtype_map[k] = rtn_dtype[0]
rtn_dtype = self._name_dtype_map[_name_dtype_keys[0]]
rtn = []
dtype = rtn_dtype.get("dtype", None)
partitions = rtn_dtype.get("partitions", None)
if dtype == FederationDataType.TABLE or dtype == FederationDataType.SPLIT_OBJECT:
party_topic_infos = self._get_party_topic_infos(parties, name, partitions=partitions)
for i in range(len(party_topic_infos)):
party = parties[i]
role = party.role
party_id = party.party_id
topic_infos = party_topic_infos[i]
receive_func = self._get_partition_receive_func(
name=name,
tag=tag,
src_party_id=self._party.party_id,
src_role=self._party.role,
dst_party_id=party_id,
dst_role=role,
topic_infos=topic_infos,
mq=self._mq,
conf=self._conf
)
table = computing_session.parallelize(range(partitions), partitions, include_key=False)
table = table.mapPartitionsWithIndex(receive_func)
# add gc
gc.add_gc_action(tag, table, "__del__", {})
LOGGER.debug(
f"[{log_str}]received table({i + 1}/{len(parties)}), party: {parties[i]} "
)
if dtype == FederationDataType.TABLE:
rtn.append(table)
else:
obj_bytes = b''.join(map(lambda t: t[1], sorted(table.collect(), key=lambda x: x[0])))
obj = p_loads(obj_bytes)
rtn.append(obj)
else:
party_topic_infos = self._get_party_topic_infos(parties, name)
channel_infos = self._get_channels(party_topic_infos=party_topic_infos)
for i, info in enumerate(channel_infos):
obj = self._receive_obj(info, name, tag)
LOGGER.debug(
f"[{log_str}]received obj({i + 1}/{len(parties)}), party: {parties[i]} "
)
rtn.append(obj)
LOGGER.debug(f"[{log_str}]finish to get")
return rtn
def remote(
self,
v,
name: str,
tag: str,
parties: typing.List[Party],
gc: GarbageCollectionABC,
) -> typing.NoReturn:
log_str = f"[federation.remote](name={name}, tag={tag}, parties={parties})"
_name_dtype_keys = [
_SPLIT_.join([party.role, party.party_id, name, tag, "remote"])
for party in parties
]
if _name_dtype_keys[0] not in self._name_dtype_map:
party_topic_infos = self._get_party_topic_infos(parties, dtype=NAME_DTYPE_TAG)
channel_infos = self._get_channels(party_topic_infos=party_topic_infos)
if not isinstance(v, CTableABC):
v, num_slice = _get_splits(v, self._max_message_size)
if num_slice > 1:
v = computing_session.parallelize(data=v, partition=1, include_key=True)
body = {"dtype": FederationDataType.SPLIT_OBJECT, "partitions": v.partitions}
else:
body = {"dtype": FederationDataType.OBJECT}
else:
body = {"dtype": FederationDataType.TABLE, "partitions": v.partitions}
LOGGER.debug(
f"[federation.remote] _name_dtype_keys: {_name_dtype_keys}, dtype: {body}"
)
self._send_obj(
name=name,
tag=_SPLIT_.join([tag, NAME_DTYPE_TAG]),
data=p_dumps(body),
channel_infos=channel_infos,
)
for k in _name_dtype_keys:
if k not in self._name_dtype_map:
self._name_dtype_map[k] = body
if isinstance(v, CTableABC):
total_size = v.count()
partitions = v.partitions
LOGGER.debug(
f"[{log_str}]start to remote table, total_size={total_size}, partitions={partitions}"
)
party_topic_infos = self._get_party_topic_infos(parties, name, partitions=partitions)
# add gc
gc.add_gc_action(tag, v, "__del__", {})
send_func = self._get_partition_send_func(
name=name,
tag=tag,
partitions=partitions,
party_topic_infos=party_topic_infos,
src_party_id=self._party.party_id,
src_role=self._party.role,
mq=self._mq,
max_message_size=self._max_message_size,
conf=self._conf
)
# noinspection PyProtectedMember
v.mapPartitionsWithIndex(send_func)
else:
LOGGER.debug(f"[{log_str}]start to remote obj")
party_topic_infos = self._get_party_topic_infos(parties, name)
channel_infos = self._get_channels(party_topic_infos=party_topic_infos)
self._send_obj(
name=name, tag=tag, data=p_dumps(v), channel_infos=channel_infos
)
LOGGER.debug(f"[{log_str}]finish to remote")
def _get_party_topic_infos(
self, parties: typing.List[Party], name=None, partitions=None, dtype=None
) -> typing.List:
topic_infos = [
self._get_or_create_topic(party, name, partitions, dtype)
for party in parties
]
return topic_infos
def _maybe_create_topic_and_replication(self, party, topic_suffix):
# gen names
raise NotImplementedError()
def _get_or_create_topic(
self, party: Party, name=None, partitions=None, dtype=None
) -> typing.Tuple:
topic_key_list = []
topic_infos = []
if dtype is not None:
topic_key = _SPLIT_.join(
[party.role, party.party_id, dtype, dtype])
topic_key_list.append(topic_key)
else:
if partitions is not None:
for i in range(partitions):
topic_key = _SPLIT_.join(
[party.role, party.party_id, name, str(i)])
topic_key_list.append(topic_key)
elif name is not None:
topic_key = _SPLIT_.join([party.role, party.party_id, name])
topic_key_list.append(topic_key)
else:
topic_key = _SPLIT_.join([party.role, party.party_id])
topic_key_list.append(topic_key)
for topic_key in topic_key_list:
if topic_key not in self._topic_map:
topic_key_splits = topic_key.split(_SPLIT_)
topic_suffix = "-".join(topic_key_splits[2:])
topic_pair = self._maybe_create_topic_and_replication(party, topic_suffix)
self._topic_map[topic_key] = topic_pair
topic_pair = self._topic_map[topic_key]
topic_infos.append((topic_key, topic_pair))
return topic_infos
def _get_channel(
self, topic_pair, src_party_id, src_role, dst_party_id, dst_role, mq=None, conf: dict = None):
raise NotImplementedError()
def _get_channels(self, party_topic_infos):
channel_infos = []
for e in party_topic_infos:
for topic_key, topic_pair in e:
topic_key_splits = topic_key.split(_SPLIT_)
role = topic_key_splits[0]
party_id = topic_key_splits[1]
info = self._channels_map.get(topic_key)
if info is None:
info = self._get_channel(
topic_pair=topic_pair,
src_party_id=self._party.party_id,
src_role=self._party.role,
dst_party_id=party_id,
dst_role=role,
mq=self._mq,
conf=self._conf
)
self._channels_map[topic_key] = info
channel_infos.append(info)
return channel_infos
def _get_channels_index(self, index, party_topic_infos, src_party_id, src_role, mq=None, conf: dict = None):
channel_infos = []
for e in party_topic_infos:
# select specified topic_info for a party
topic_key, topic_pair = e[index]
topic_key_splits = topic_key.split(_SPLIT_)
role = topic_key_splits[0]
party_id = topic_key_splits[1]
info = self._get_channel(
topic_pair=topic_pair,
src_party_id=src_party_id,
src_role=src_role,
dst_party_id=party_id,
dst_role=role,
mq=mq,
conf=conf
)
channel_infos.append(info)
return channel_infos
def _send_obj(self, name, tag, data, channel_infos):
for info in channel_infos:
properties = {
"content_type": "text/plain",
"app_id": info._dst_party_id,
"message_id": name,
"correlation_id": tag
}
LOGGER.debug(f"[federation._send_obj]properties:{properties}.")
info.produce(body=data, properties=properties)
def _send_kv(
self, name, tag, data, channel_infos, partition_size, partitions, message_key
):
headers = json.dumps(
{
"partition_size": partition_size,
"partitions": partitions,
"message_key": message_key
}
)
for info in channel_infos:
properties = {
"content_type": "application/json",
"app_id": info._dst_party_id,
"message_id": name,
"correlation_id": tag,
"headers": headers
}
print(f"[federation._send_kv]info: {info}, properties: {properties}.")
info.produce(body=data, properties=properties)
def _get_partition_send_func(
self,
name,
tag,
partitions,
party_topic_infos,
src_party_id,
src_role,
mq,
max_message_size,
conf: dict,
):
def _fn(index, kvs):
return self._partition_send(
index=index,
kvs=kvs,
name=name,
tag=tag,
partitions=partitions,
party_topic_infos=party_topic_infos,
src_party_id=src_party_id,
src_role=src_role,
mq=mq,
max_message_size=max_message_size,
conf=conf,
)
return _fn
def _partition_send(
self,
index,
kvs,
name,
tag,
partitions,
party_topic_infos,
src_party_id,
src_role,
mq,
max_message_size,
conf: dict,
):
channel_infos = self._get_channels_index(
index=index, party_topic_infos=party_topic_infos, src_party_id=src_party_id, src_role=src_role, mq=mq,
conf=conf
)
datastream = Datastream()
base_message_key = str(index)
message_key_idx = 0
count = 0
for k, v in kvs:
count += 1
el = {"k": p_dumps(k).hex(), "v": p_dumps(v).hex()}
# roughly caculate the size of package to avoid serialization ;)
if (
datastream.get_size() + sys.getsizeof(el["k"]) + sys.getsizeof(el["v"])
>= max_message_size
):
print(
f"[federation._partition_send]The size of message is: {datastream.get_size()}"
)
message_key_idx += 1
message_key = base_message_key + "_" + str(message_key_idx)
self._send_kv(
name=name,
tag=tag,
data=datastream.get_data().encode(),
channel_infos=channel_infos,
partition_size=-1,
partitions=partitions,
message_key=message_key,
)
datastream.clear()
datastream.append(el)
message_key_idx += 1
message_key = _SPLIT_.join([base_message_key, str(message_key_idx)])
self._send_kv(
name=name,
tag=tag,
data=datastream.get_data().encode(),
channel_infos=channel_infos,
partition_size=count,
partitions=partitions,
message_key=message_key,
)
return [(index, 1)]
def _get_message_cache_key(self, name, tag, party_id, role):
cache_key = _SPLIT_.join([name, tag, str(party_id), role])
return cache_key
def _get_consume_message(self, channel_info):
raise NotImplementedError()
def _consume_ack(self, channel_info, id):
raise NotImplementedError()
def _query_receive_topic(self, channel_info):
return channel_info
def _receive_obj(self, channel_info, name, tag):
party_id = channel_info._dst_party_id
role = channel_info._dst_role
wish_cache_key = self._get_message_cache_key(name, tag, party_id, role)
if wish_cache_key in self._message_cache:
recv_obj = self._message_cache[wish_cache_key]
del self._message_cache[wish_cache_key]
return recv_obj
channel_info = self._query_receive_topic(channel_info)
for id, properties, body in self._get_consume_message(channel_info):
LOGGER.debug(
f"[federation._receive_obj] properties: {properties}"
)
if properties["message_id"] != name or properties["correlation_id"] != tag:
# todo: fix this
LOGGER.warning(
f"[federation._receive_obj] require {name}.{tag}, got {properties['message_id']}.{properties['correlation_id']}"
)
cache_key = self._get_message_cache_key(
properties["message_id"], properties["correlation_id"], party_id, role
)
# object
if properties["content_type"] == "text/plain":
recv_obj = p_loads(body)
self._consume_ack(channel_info, id)
LOGGER.debug(
f"[federation._receive_obj] cache_key: {cache_key}, wish_cache_key: {wish_cache_key}"
)
if cache_key == wish_cache_key:
channel_info.cancel()
return recv_obj
else:
self._message_cache[cache_key] = recv_obj
else:
raise ValueError(
f"[federation._receive_obj] properties.content_type is {properties['content_type']}, but must be text/plain"
)
def _get_partition_receive_func(
self, name, tag, src_party_id, src_role, dst_party_id, dst_role, topic_infos, mq, conf: dict
):
def _fn(index, kvs):
return self._partition_receive(
index=index,
kvs=kvs,
name=name,
tag=tag,
src_party_id=src_party_id,
src_role=src_role,
dst_party_id=dst_party_id,
dst_role=dst_role,
topic_infos=topic_infos,
mq=mq,
conf=conf,
)
return _fn
def _partition_receive(
self,
index,
kvs,
name,
tag,
src_party_id,
src_role,
dst_party_id,
dst_role,
topic_infos,
mq,
conf: dict,
):
topic_pair = topic_infos[index][1]
channel_info = self._get_channel(topic_pair=topic_pair,
src_party_id=src_party_id,
src_role=src_role,
dst_party_id=dst_party_id,
dst_role=dst_role,
mq=mq,
conf=conf)
message_key_cache = set()
count = 0
partition_size = -1
all_data = []
channel_info = self._query_receive_topic(channel_info)
while True:
try:
for id, properties, body in self._get_consume_message(channel_info):
print(
f"[federation._partition_receive] properties: {properties}."
)
if properties["message_id"] != name or properties["correlation_id"] != tag:
# todo: fix this
self._consume_ack(channel_info, id)
print(
f"[federation._partition_receive]: require {name}.{tag}, got {properties['message_id']}.{properties['correlation_id']}"
)
continue
if properties["content_type"] == "application/json":
header = json.loads(properties["headers"])
message_key = header["message_key"]
if message_key in message_key_cache:
print(
f"[federation._partition_receive] message_key : {message_key} is duplicated"
)
self._consume_ack(channel_info, id)
continue
message_key_cache.add(message_key)
if header["partition_size"] >= 0:
partition_size = header["partition_size"]
data = json.loads(body.decode())
data_iter = (
(p_loads(bytes.fromhex(el["k"])), p_loads(bytes.fromhex(el["v"])))
for el in data
)
count += len(data)
print(f"[federation._partition_receive] count: {count}")
all_data.extend(data_iter)
self._consume_ack(channel_info, id)
if count == partition_size:
channel_info.cancel()
return all_data
else:
ValueError(
f"[federation._partition_receive]properties.content_type is {properties['content_type']}, but must be application/json"
)
except Exception as e:
LOGGER.error(
f"[federation._partition_receive]catch exception {e}, while receiving {name}.{tag}"
)
# avoid hang on consume()
if count == partition_size:
channel_info.cancel()
return all_data
else:
raise e
| 23,004 | 35.515873 | 147 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/_gc.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from collections import deque
from fate_arch.abc import GarbageCollectionABC
from fate_arch.common.log import getLogger
LOGGER = getLogger()
class IterationGC(GarbageCollectionABC):
def __init__(self, capacity=2):
self._ashcan: deque[typing.List[typing.Tuple[typing.Any, str, dict]]] = deque()
self._last_tag: typing.Optional[str] = None
self._capacity = capacity
self._enable = True
def add_gc_action(self, tag: str, obj, method, args_dict):
if self._last_tag == tag:
self._ashcan[-1].append((obj, method, args_dict))
else:
self._ashcan.append([(obj, method, args_dict)])
self._last_tag = tag
def disable(self):
self._enable = False
def set_capacity(self, capacity):
self._capacity = capacity
def gc(self):
if not self._enable:
return
if len(self._ashcan) <= self._capacity:
return
self._safe_gc_call(self._ashcan.popleft())
def clean(self):
while self._ashcan:
self._safe_gc_call(self._ashcan.pop())
@staticmethod
def _safe_gc_call(actions: typing.List[typing.Tuple[typing.Any, str, dict]]):
for obj, method, args_dict in actions:
try:
LOGGER.debug(f"[CLEAN]deleting {obj}, {method}, {args_dict}")
getattr(obj, method)(**args_dict)
except Exception as e:
LOGGER.debug(f"[CLEAN]this could be ignore {e}")
| 2,127 | 31.738462 | 87 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/transfer_variable.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import typing
from typing import Union
from fate_arch.common import Party, profile
from fate_arch.common.log import getLogger
from fate_arch.federation._gc import IterationGC
__all__ = ["Variable", "BaseTransferVariables"]
LOGGER = getLogger()
class FederationTagNamespace(object):
__namespace = "default"
@classmethod
def set_namespace(cls, namespace):
cls.__namespace = namespace
@classmethod
def generate_tag(cls, *suffix):
tags = (cls.__namespace, *map(str, suffix))
return ".".join(tags)
class Variable(object):
"""
variable to distinguish federation by name
"""
__instances: typing.MutableMapping[str, "Variable"] = {}
@classmethod
def get_or_create(
cls, name, create_func: typing.Callable[[], "Variable"]
) -> "Variable":
if name not in cls.__instances:
value = create_func()
cls.__instances[name] = value
return cls.__instances[name]
def __init__(
self, name: str, src: typing.Tuple[str, ...], dst: typing.Tuple[str, ...]
):
if name in self.__instances:
raise RuntimeError(
f"{self.__instances[name]} with {name} already initialized, which expected to be an singleton object."
)
assert (
len(name.split(".")) >= 3
), "incorrect name format, should be `module_name.class_name.variable_name`"
self._name = name
self._src = src
self._dst = dst
self._get_gc = IterationGC()
self._remote_gc = IterationGC()
self._use_short_name = True
self._short_name = self._get_short_name(self._name)
@staticmethod
def _get_short_name(name):
fix_sized = hashlib.blake2b(name.encode("utf-8"), digest_size=10).hexdigest()
_, right = name.rsplit(".", 1)
return f"hash.{fix_sized}.{right}"
# copy never create a new instance
def __copy__(self):
return self
# deepcopy never create a new instance
def __deepcopy__(self, memo):
return self
def set_preserve_num(self, n):
self._get_gc.set_capacity(n)
self._remote_gc.set_capacity(n)
return self
def disable_auto_clean(self):
self._get_gc.disable()
self._remote_gc.disable()
return self
def clean(self):
self._get_gc.clean()
self._remote_gc.clean()
def remote_parties(
self,
obj,
parties: Union[typing.List[Party], Party],
suffix: Union[typing.Any, typing.Tuple] = tuple(),
):
"""
remote object to specified parties
Parameters
----------
obj: object or table
object or table to remote
parties: typing.List[Party]
parties to remote object/table to
suffix: str or tuple of str
suffix used to distinguish federation with in variable
Returns
-------
None
"""
from fate_arch.session import get_session
session = get_session()
if isinstance(parties, Party):
parties = [parties]
if not isinstance(suffix, tuple):
suffix = (suffix,)
tag = FederationTagNamespace.generate_tag(*suffix)
for party in parties:
if party.role not in self._dst:
raise RuntimeError(
f"not allowed to remote object to {party} using {self._name}"
)
local = session.parties.local_party.role
if local not in self._src:
raise RuntimeError(
f"not allowed to remote object from {local} using {self._name}"
)
name = self._short_name if self._use_short_name else self._name
timer = profile.federation_remote_timer(name, self._name, tag, local, parties)
session.federation.remote(
v=obj, name=name, tag=tag, parties=parties, gc=self._remote_gc
)
timer.done(session.federation)
self._remote_gc.gc()
def get_parties(
self,
parties: Union[typing.List[Party], Party],
suffix: Union[typing.Any, typing.Tuple] = tuple(),
):
"""
get objects/tables from specified parties
Parameters
----------
parties: typing.List[Party]
parties to remote object/table to
suffix: str or tuple of str
suffix used to distinguish federation with in variable
Returns
-------
list
a list of objects/tables get from parties with same order of ``parties``
"""
from fate_arch.session import get_session
session = get_session()
if not isinstance(parties, list):
parties = [parties]
if not isinstance(suffix, tuple):
suffix = (suffix,)
tag = FederationTagNamespace.generate_tag(*suffix)
for party in parties:
if party.role not in self._src:
raise RuntimeError(
f"not allowed to get object from {party} using {self._name}"
)
local = session.parties.local_party.role
if local not in self._dst:
raise RuntimeError(
f"not allowed to get object to {local} using {self._name}"
)
name = self._short_name if self._use_short_name else self._name
timer = profile.federation_get_timer(name, self._name, tag, local, parties)
rtn = session.federation.get(
name=name, tag=tag, parties=parties, gc=self._get_gc
)
timer.done(session.federation)
self._get_gc.gc()
return rtn
def remote(self, obj, role=None, idx=-1, suffix=tuple()):
"""
send obj to other parties.
Args:
obj: object to be sent
role: role of parties to sent to, use one of ['Host', 'Guest', 'Arbiter', None].
The default is None, means sent values to parties regardless their party role
idx: id of party to sent to.
The default is -1, which means sent values to parties regardless their party id
suffix: additional tag suffix, the default is tuple()
"""
from fate_arch.session import get_parties
party_info = get_parties()
if idx >= 0 and role is None:
raise ValueError("role cannot be None if idx specified")
# get subset of dst roles in runtime conf
if role is None:
parties = party_info.roles_to_parties(self._dst, strict=False)
else:
if isinstance(role, str):
role = [role]
parties = party_info.roles_to_parties(role)
if idx >= 0:
if idx >= len(parties):
raise RuntimeError(
f"try to remote to {idx}th party while only {len(parties)} configurated: {parties}, check {self._name}"
)
parties = parties[idx]
return self.remote_parties(obj=obj, parties=parties, suffix=suffix)
def get(self, idx=-1, role=None, suffix=tuple()):
"""
get obj from other parties.
Args:
idx: id of party to get from.
The default is -1, which means get values from parties regardless their party id
suffix: additional tag suffix, the default is tuple()
Returns:
object or list of object
"""
from fate_arch.session import get_parties
if role is None:
src_parties = get_parties().roles_to_parties(roles=self._src, strict=False)
else:
if isinstance(role, str):
role = [role]
src_parties = get_parties().roles_to_parties(roles=role, strict=False)
if isinstance(idx, list):
rtn = self.get_parties(parties=[src_parties[i] for i in idx], suffix=suffix)
elif isinstance(idx, int):
if idx < 0:
rtn = self.get_parties(parties=src_parties, suffix=suffix)
else:
if idx >= len(src_parties):
raise RuntimeError(
f"try to get from {idx}th party while only {len(src_parties)} configurated: {src_parties}, check {self._name}"
)
rtn = self.get_parties(parties=src_parties[idx], suffix=suffix)[0]
else:
raise ValueError(
f"illegal idx type: {type(idx)}, supported types: int or list of int"
)
return rtn
class BaseTransferVariables(object):
def __init__(self, *args):
pass
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
@staticmethod
def set_flowid(flowid):
"""
set global namespace for federations.
Parameters
----------
flowid: str
namespace
Returns
-------
None
"""
FederationTagNamespace.set_namespace(str(flowid))
def _create_variable(
self, name: str, src: typing.Iterable[str], dst: typing.Iterable[str]
) -> Variable:
full_name = f"{self.__module__}.{self.__class__.__name__}.{name}"
return Variable.get_or_create(
full_name, lambda: Variable(name=full_name, src=tuple(src), dst=tuple(dst))
)
@staticmethod
def all_parties():
"""
get all parties
Returns
-------
list
list of parties
"""
from fate_arch.session import get_parties
return get_parties().all_parties
@staticmethod
def local_party():
"""
indicate local party
Returns
-------
Party
party this program running on
"""
from fate_arch.session import get_parties
return get_parties().local_party
| 10,532 | 29.354467 | 134 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/__init__.py
|
from fate_arch.federation._type import FederationEngine
from fate_arch.federation._type import FederationDataType
__all__ = [
"FederationEngine",
"FederationDataType"
]
| 178 | 21.375 | 57 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/_type.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class FederationEngine(object):
EGGROLL = 'EGGROLL'
RABBITMQ = 'RABBITMQ'
STANDALONE = 'STANDALONE'
PULSAR = 'PULSAR'
class FederationDataType(object):
OBJECT = "obj"
TABLE = "Table"
SPLIT_OBJECT = "split_obj"
| 858 | 28.62069 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/standalone/_federation.py
|
import typing
from fate_arch._standalone import Federation as RawFederation, Table as RawTable
from fate_arch.abc import FederationABC
from fate_arch.abc import GarbageCollectionABC
from fate_arch.common import Party, log
from fate_arch.computing.standalone import Table
LOGGER = log.getLogger()
class Federation(FederationABC):
def __init__(self, standalone_session, federation_session_id, party):
LOGGER.debug(
f"[federation.standalone]init federation: "
f"standalone_session={standalone_session}, "
f"federation_session_id={federation_session_id}, "
f"party={party}"
)
self._session_id = federation_session_id
self._federation = RawFederation(
standalone_session, federation_session_id, party
)
LOGGER.debug("[federation.standalone]init federation context done")
@property
def session_id(self) -> str:
return self._session_id
def remote(
self,
v,
name: str,
tag: str,
parties: typing.List[Party],
gc: GarbageCollectionABC,
):
if not _remote_tag_not_duplicate(name, tag, parties):
raise ValueError(f"remote to {parties} with duplicate tag: {name}.{tag}")
if isinstance(v, Table):
# noinspection PyProtectedMember
v = v._table
return self._federation.remote(v=v, name=name, tag=tag, parties=parties)
# noinspection PyProtectedMember
def get(
self, name: str, tag: str, parties: typing.List[Party], gc: GarbageCollectionABC
) -> typing.List:
for party in parties:
if not _get_tag_not_duplicate(name, tag, party):
raise ValueError(f"get from {party} with duplicate tag: {name}.{tag}")
rtn = self._federation.get(name=name, tag=tag, parties=parties)
return [Table(r) if isinstance(r, RawTable) else r for r in rtn]
def destroy(self, parties):
self._federation.destroy()
_remote_history = set()
def _remote_tag_not_duplicate(name, tag, parties):
for party in parties:
if (name, tag, party) in _remote_history:
return False
_remote_history.add((name, tag, party))
return True
_get_history = set()
def _get_tag_not_duplicate(name, tag, party):
if (name, tag, party) in _get_history:
return False
_get_history.add((name, tag, party))
return True
| 2,442 | 29.5375 | 88 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/standalone/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.federation.standalone._federation import Federation
__all__ = ['Federation']
| 710 | 34.55 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/eggroll/_federation.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import concurrent.futures
import os
import signal
from enum import Enum
from eggroll.roll_pair.roll_pair import RollPair
from eggroll.roll_site.roll_site import RollSiteContext
from fate_arch.abc import FederationABC
from fate_arch.common.log import getLogger
from fate_arch.computing.eggroll import Table
from fate_arch.common import remote_status
LOGGER = getLogger()
class Federation(FederationABC):
def __init__(self, rp_ctx, rs_session_id, party, proxy_endpoint):
LOGGER.debug(
f"[federation.eggroll]init federation: "
f"rp_session_id={rp_ctx.session_id}, rs_session_id={rs_session_id}, "
f"party={party}, proxy_endpoint={proxy_endpoint}"
)
options = {
"self_role": party.role,
"self_party_id": party.party_id,
"proxy_endpoint": proxy_endpoint,
}
self._session_id = rs_session_id
self._rp_ctx = rp_ctx
self._rsc = RollSiteContext(rs_session_id, rp_ctx=rp_ctx, options=options)
LOGGER.debug(f"[federation.eggroll]init federation context done")
@property
def session_id(self) -> str:
return self._session_id
def get(self, name, tag, parties, gc):
parties = [(party.role, party.party_id) for party in parties]
raw_result = _get(name, tag, parties, self._rsc, gc)
return [Table(v) if isinstance(v, RollPair) else v for v in raw_result]
def remote(self, v, name, tag, parties, gc):
if isinstance(v, Table):
# noinspection PyProtectedMember
v = v._rp
parties = [(party.role, party.party_id) for party in parties]
_remote(v, name, tag, parties, self._rsc, gc)
def destroy(self, parties):
self._rp_ctx.cleanup(name="*", namespace=self._session_id)
def _remote(v, name, tag, parties, rsc, gc):
log_str = f"federation.eggroll.remote.{name}.{tag}{parties})"
if v is None:
raise ValueError(f"[{log_str}]remote `None` to {parties}")
if not _remote_tag_not_duplicate(name, tag, parties):
raise ValueError(f"[{log_str}]remote to {parties} with duplicate tag")
t = _get_type(v)
if t == _FederationValueType.ROLL_PAIR:
LOGGER.debug(
f"[{log_str}]remote "
f"RollPair(namespace={v.get_namespace()}, name={v.get_name()}, partitions={v.get_partitions()})"
)
gc.add_gc_action(tag, v, "destroy", {})
_push_with_exception_handle(rsc, v, name, tag, parties)
return
if t == _FederationValueType.OBJECT:
LOGGER.debug(f"[{log_str}]remote object with type: {type(v)}")
_push_with_exception_handle(rsc, v, name, tag, parties)
return
raise NotImplementedError(f"t={t}")
def _get(name, tag, parties, rsc, gc):
rs = rsc.load(name=name, tag=tag)
future_map = dict(zip(rs.pull(parties=parties), parties))
rtn = {}
for future in concurrent.futures.as_completed(future_map):
party = future_map[future]
v = future.result()
rtn[party] = _get_value_post_process(v, name, tag, party, gc)
return [rtn[party] for party in parties]
class _FederationValueType(Enum):
OBJECT = 1
ROLL_PAIR = 2
_remote_history = set()
def _remote_tag_not_duplicate(name, tag, parties):
for party in parties:
if (name, tag, party) in _remote_history:
return False
_remote_history.add((name, tag, party))
return True
def _get_type(v):
if isinstance(v, RollPair):
return _FederationValueType.ROLL_PAIR
return _FederationValueType.OBJECT
def _push_with_exception_handle(rsc, v, name, tag, parties):
def _remote_exception_re_raise(f, p):
try:
f.result()
LOGGER.debug(
f"[federation.eggroll.remote.{name}.{tag}]future to remote to party: {p} done"
)
except Exception as e:
pid = os.getpid()
LOGGER.exception(
f"[federation.eggroll.remote.{name}.{tag}]future to remote to party: {p} fail,"
f" terminating process(pid={pid})"
)
import traceback
print(
f"federation.eggroll.remote.{name}.{tag} future to remote to party: {p} fail,"
f" terminating process {pid}, traceback: {traceback.format_exc()}"
)
os.kill(pid, signal.SIGTERM)
raise e
def _get_call_back_func(p):
def _callback(f):
return _remote_exception_re_raise(f, p)
return _callback
rs = rsc.load(name=name, tag=tag)
futures = rs.push(obj=v, parties=parties)
for party, future in zip(parties, futures):
future.add_done_callback(_get_call_back_func(party))
remote_status.add_remote_futures(futures)
return rs
_get_history = set()
def _get_tag_not_duplicate(name, tag, party):
if (name, tag, party) in _get_history:
return False
_get_history.add((name, tag, party))
return True
def _get_value_post_process(v, name, tag, party, gc):
log_str = f"federation.eggroll.get.{name}.{tag}"
if v is None:
raise ValueError(f"[{log_str}]get `None` from {party}")
if not _get_tag_not_duplicate(name, tag, party):
raise ValueError(f"[{log_str}]get from {party} with duplicate tag")
# got a roll pair
if isinstance(v, RollPair):
LOGGER.debug(
f"[{log_str}] got "
f"RollPair(namespace={v.get_namespace()}, name={v.get_name()}, partitions={v.get_partitions()})"
)
gc.add_gc_action(tag, v, "destroy", {})
return v
# others
LOGGER.debug(f"[{log_str}] got object with type: {type(v)}")
return v
| 6,318 | 31.740933 | 108 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/eggroll/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.federation.eggroll._federation import Federation
__all__ = ['Federation']
| 707 | 34.4 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/pulsar/_federation.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.common import Party
from fate_arch.common import file_utils
from fate_arch.common.log import getLogger
from fate_arch.federation._federation import FederationBase
from fate_arch.federation.pulsar._mq_channel import (
MQChannel,
DEFAULT_TENANT,
DEFAULT_CLUSTER,
DEFAULT_SUBSCRIPTION_NAME,
)
from fate_arch.federation.pulsar._pulsar_manager import PulsarManager
LOGGER = getLogger()
# default message max size in bytes = 1MB
DEFAULT_MESSAGE_MAX_SIZE = 104857
class MQ(object):
def __init__(self, host, port, route_table):
self.host = host
self.port = port
self.route_table = route_table
def __str__(self):
return (
f"MQ(host={self.host}, port={self.port} "
f"route_table={self.route_table}), "
f"type=pulsar"
)
def __repr__(self):
return self.__str__()
class _TopicPair(object):
def __init__(self, tenant, namespace, send, receive):
self.tenant = tenant
self.namespace = namespace
self.send = send
self.receive = receive
class Federation(FederationBase):
@staticmethod
def from_conf(
federation_session_id: str,
party: Party,
runtime_conf: dict,
**kwargs
):
pulsar_config = kwargs["pulsar_config"]
LOGGER.debug(f"pulsar_config: {pulsar_config}")
host = pulsar_config.get("host", "localhost")
port = pulsar_config.get("port", "6650")
mng_port = pulsar_config.get("mng_port", "8080")
topic_ttl = int(pulsar_config.get("topic_ttl", 0))
cluster = pulsar_config.get("cluster", DEFAULT_CLUSTER)
# tenant name should be unified between parties
tenant = pulsar_config.get("tenant", DEFAULT_TENANT)
# max_message_size;
max_message_size = int(pulsar_config.get("max_message_size", DEFAULT_MESSAGE_MAX_SIZE))
pulsar_run = runtime_conf.get(
"job_parameters", {}).get("pulsar_run", {})
LOGGER.debug(f"pulsar_run: {pulsar_run}")
max_message_size = int(pulsar_run.get(
"max_message_size", max_message_size))
LOGGER.debug(f"set max message size to {max_message_size} Bytes")
# topic ttl could be overwritten by run time config
topic_ttl = int(pulsar_run.get("topic_ttl", topic_ttl))
# pulsar not use user and password so far
# TODO add credential to connections
base_user = pulsar_config.get("user")
base_password = pulsar_config.get("password")
mode = pulsar_config.get("mode", "replication")
pulsar_manager = PulsarManager(
host=host, port=mng_port, runtime_config=pulsar_run
)
# init tenant
tenant_info = pulsar_manager.get_tenant(tenant=tenant).json()
if tenant_info.get("allowedClusters") is None:
pulsar_manager.create_tenant(
tenant=tenant, admins=[], clusters=[cluster])
route_table_path = pulsar_config.get("route_table")
if route_table_path is None:
route_table_path = "conf/pulsar_route_table.yaml"
route_table = file_utils.load_yaml_conf(conf_path=route_table_path)
mq = MQ(host, port, route_table)
conf = pulsar_manager.runtime_config.get(
"connection", {}
)
LOGGER.debug(f"federation mode={mode}")
return Federation(
federation_session_id,
party,
mq,
pulsar_manager,
max_message_size,
topic_ttl,
cluster,
tenant,
conf,
mode
)
def __init__(self, session_id, party: Party, mq: MQ, pulsar_manager: PulsarManager, max_message_size, topic_ttl,
cluster, tenant, conf, mode):
super().__init__(session_id=session_id, party=party, mq=mq, max_message_size=max_message_size, conf=conf)
self._pulsar_manager = pulsar_manager
self._topic_ttl = topic_ttl
self._cluster = cluster
self._tenant = tenant
self._mode = mode
def __getstate__(self):
pass
def destroy(self, parties):
# The idea cleanup strategy is to consume all message in topics,
# and let pulsar cluster to collect the used topics.
LOGGER.debug("[pulsar.cleanup]start to cleanup...")
# 1. remove subscription
response = self._pulsar_manager.unsubscribe_namespace_all_topics(
tenant=self._tenant,
namespace=self._session_id,
subscription_name=DEFAULT_SUBSCRIPTION_NAME,
)
if response.ok:
LOGGER.debug("successfully unsubscribe all topics")
else:
LOGGER.error(response.text)
# 2. reset retention policy
response = self._pulsar_manager.set_retention(
self._tenant,
self._session_id,
retention_time_in_minutes=0,
retention_size_in_MB=0,
)
if response.ok:
LOGGER.debug("successfully reset all retention policy")
else:
LOGGER.error(response.text)
# 3. remove cluster from namespace
response = self._pulsar_manager.set_clusters_to_namespace(
self._tenant, self._session_id, [self._cluster]
)
if response.ok:
LOGGER.debug("successfully reset all replicated cluster")
else:
LOGGER.error(response.text)
# # 4. remove namespace
# response = self._pulsar_manager.delete_namespace(
# self._tenant, self._session_id
# )
# if response.ok:
# LOGGER.debug(f"successfully delete namespace={self._session_id}")
# else:
# LOGGER.error(response.text)
def _maybe_create_topic_and_replication(self, party, topic_suffix):
if self._mode == "replication":
return self._create_topic_by_replication_mode(party, topic_suffix)
if self._mode == "client":
return self._create_topic_by_client_mode(party, topic_suffix)
raise ValueError("mode={self._mode} is not support!")
def _create_topic_by_client_mode(self, party, topic_suffix):
send_topic_name = f"{self._party.role}-{self._party.party_id}-{party.role}-{party.party_id}-{topic_suffix}"
receive_topic_name = f"{party.role}-{party.party_id}-{self._party.role}-{self._party.party_id}-{topic_suffix}"
# topic_pair is a pair of topic for sending and receiving message respectively
topic_pair = _TopicPair(
tenant=self._tenant,
namespace=self._session_id,
send=send_topic_name,
receive=receive_topic_name,
)
# init pulsar namespace
namespaces = self._pulsar_manager.get_namespace(
self._tenant).json()
# create namespace
if f"{self._tenant}/{self._session_id}" not in namespaces:
# append target cluster to the pulsar namespace
code = self._pulsar_manager.create_namespace(
self._tenant, self._session_id
).status_code
# according to https://pulsar.apache.org/admin-rest-api/?version=2.7.0&apiversion=v2#operation/getPolicies
# return 409 if existed
# return 204 if ok
if code == 204 or code == 409:
LOGGER.debug(
"successfully create pulsar namespace: %s", self._session_id
)
else:
raise Exception(
"unable to create pulsar namespace with status code: {}".format(
code
)
)
# set message ttl for the namespace
response = self._pulsar_manager.set_retention(
self._tenant,
self._session_id,
retention_time_in_minutes=int(self._topic_ttl),
retention_size_in_MB=-1,
)
LOGGER.debug(response.text)
if response.ok:
LOGGER.debug(
"successfully set message ttl to namespace: {} about {} mintues".format(
self._session_id, self._topic_ttl
)
)
else:
LOGGER.debug("failed to set message ttl to namespace")
return topic_pair
def _create_topic_by_replication_mode(self, party, topic_suffix):
send_topic_name = f"{self._party.role}-{self._party.party_id}-{party.role}-{party.party_id}-{topic_suffix}"
receive_topic_name = f"{party.role}-{party.party_id}-{self._party.role}-{self._party.party_id}-{topic_suffix}"
# topic_pair is a pair of topic for sending and receiving message respectively
topic_pair = _TopicPair(
tenant=self._tenant,
namespace=self._session_id,
send=send_topic_name,
receive=receive_topic_name,
)
if party.party_id == self._party.party_id:
LOGGER.debug(
"connecting to local broker, skipping cluster creation"
)
else:
# init pulsar cluster
cluster = self._pulsar_manager.get_cluster(
party.party_id).json()
if (
cluster.get("brokerServiceUrl", "") == ""
and cluster.get("brokerServiceUrlTls", "") == ""
):
LOGGER.debug(
"pulsar cluster with name %s does not exist or broker url is empty, creating...",
party.party_id,
)
remote_party = self._mq.route_table.get(
int(party.party_id), None
)
# handle party does not exist in route table first
if remote_party is None:
domain = self._mq.route_table.get(
"default").get("domain")
host = f"{party.party_id}.{domain}"
port = self._mq.route_table.get("default").get(
"brokerPort", "6650"
)
sslPort = self._mq.route_table.get("default").get(
"brokerSslPort", "6651"
)
proxy = self._mq.route_table.get(
"default").get("proxy", "")
# fetch party info from the route table
else:
host = self._mq.route_table.get(int(party.party_id)).get(
"host"
)
port = self._mq.route_table.get(int(party.party_id)).get(
"port", "6650"
)
sslPort = self._mq.route_table.get(int(party.party_id)).get(
"sslPort", "6651"
)
proxy = self._mq.route_table.get(int(party.party_id)).get(
"proxy", ""
)
broker_url = f"pulsar://{host}:{port}"
broker_url_tls = f"pulsar+ssl://{host}:{sslPort}"
if proxy != "":
proxy = f"pulsar+ssl://{proxy}"
if self._pulsar_manager.create_cluster(
cluster_name=party.party_id,
broker_url=broker_url,
broker_url_tls=broker_url_tls,
proxy_url=proxy,
).ok:
LOGGER.debug(
"pulsar cluster with name: %s, broker_url: %s created",
party.party_id,
broker_url,
)
elif self._pulsar_manager.update_cluster(
cluster_name=party.party_id,
broker_url=broker_url,
broker_url_tls=broker_url_tls,
proxy_url=proxy,
).ok:
LOGGER.debug(
"pulsar cluster with name: %s, broker_url: %s updated",
party.party_id,
broker_url,
)
else:
error_message = (
"unable to create pulsar cluster: %s".format(
party.party_id
)
)
LOGGER.error(error_message)
# just leave this alone.
raise Exception(error_message)
# update tenant
tenant_info = self._pulsar_manager.get_tenant(
self._tenant).json()
if party.party_id not in tenant_info["allowedClusters"]:
tenant_info["allowedClusters"].append(party.party_id)
if self._pulsar_manager.update_tenant(
self._tenant,
tenant_info.get("admins", []),
tenant_info.get(
"allowedClusters",
),
).ok:
LOGGER.debug(
"successfully update tenant with cluster: %s",
party.party_id,
)
else:
raise Exception("unable to update tenant")
# TODO: remove this for the loop
# init pulsar namespace
namespaces = self._pulsar_manager.get_namespace(
self._tenant).json()
# create namespace
if f"{self._tenant}/{self._session_id}" not in namespaces:
# append target cluster to the pulsar namespace
clusters = [self._cluster]
if (
party.party_id != self._party.party_id
and party.party_id not in clusters
):
clusters.append(party.party_id)
policy = {"replication_clusters": clusters}
code = self._pulsar_manager.create_namespace(
self._tenant, self._session_id, policies=policy
).status_code
# according to https://pulsar.apache.org/admin-rest-api/?version=2.7.0&apiversion=v2#operation/getPolicies
# return 409 if existed
# return 204 if ok
if code == 204 or code == 409:
LOGGER.debug(
"successfully create pulsar namespace: %s", self._session_id
)
else:
raise Exception(
"unable to create pulsar namespace with status code: {}".format(
code
)
)
# set message ttl for the namespace
response = self._pulsar_manager.set_retention(
self._tenant,
self._session_id,
retention_time_in_minutes=int(self._topic_ttl),
retention_size_in_MB=-1,
)
LOGGER.debug(response.text)
if response.ok:
LOGGER.debug(
"successfully set message ttl to namespace: {} about {} mintues".format(
self._session_id, self._topic_ttl
)
)
else:
LOGGER.debug("failed to set message ttl to namespace")
# update party to namespace
else:
if party.party_id != self._party.party_id:
clusters = self._pulsar_manager.get_cluster_from_namespace(
self._tenant, self._session_id
).json()
if party.party_id not in clusters:
clusters.append(party.party_id)
if self._pulsar_manager.set_clusters_to_namespace(
self._tenant, self._session_id, clusters
).ok:
LOGGER.debug(
"successfully set clusters: {} to pulsar namespace: {}".format(
clusters, self._session_id
)
)
else:
raise Exception(
"unable to update clusters: {} to pulsar namespaces: {}".format(
clusters, self._session_id
)
)
return topic_pair
def _get_channel(self, topic_pair: _TopicPair, src_party_id, src_role, dst_party_id, dst_role, mq=None,
conf: dict = None):
return MQChannel(
host=mq.host,
port=mq.port,
tenant=topic_pair.tenant,
namespace=topic_pair.namespace,
send_topic=topic_pair.send,
receive_topic=topic_pair.receive,
src_party_id=src_party_id,
src_role=src_role,
dst_party_id=dst_party_id,
dst_role=dst_role,
credential=None,
extra_args=conf,
)
def _get_consume_message(self, channel_info):
while True:
message = channel_info.consume()
body = message.data()
properties = message.properties()
message_id = message.message_id()
yield message_id, properties, body
def _consume_ack(self, channel_info, id):
channel_info.ack(message=id)
| 18,057 | 36.857442 | 118 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/pulsar/_pulsar_manager.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from fate_arch.common.log import getLogger
logger = getLogger()
MAX_RETRIES = 10
MAX_REDIRECT = 5
BACKOFF_FACTOR = 1
# sleep time equips to {BACKOFF_FACTOR} * (2 ** ({NUMBER_OF_TOTALRETRIES} - 1))
CLUSTER = 'clusters/{}'
TENANT = 'tenants/{}'
# APIs are refer to https://pulsar.apache.org/admin-rest-api/?version=2.7.0&apiversion=v2
class PulsarManager():
def __init__(self, host: str, port: str, runtime_config: dict = {}):
self.service_url = "http://{}:{}/admin/v2/".format(host, port)
self.runtime_config = runtime_config
# create session is used to construct url and request parameters
def _create_session(self):
# retry mechanism refers to
# https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.Retry
retry = Retry(total=MAX_RETRIES, redirect=MAX_REDIRECT,
backoff_factor=BACKOFF_FACTOR)
s = requests.Session()
# initialize headers
s.headers.update({'Content-Type': 'application/json'})
http_adapter = HTTPAdapter(max_retries=retry)
s.mount('http://', http_adapter)
s.mount('https://', http_adapter)
return s
# allocator
def get_allocator(self, allocator: str = 'default'):
session = self._create_session()
response = session.get(
self.service_url + 'broker-stats/allocator-stats/{}'.format(allocator))
return response
# cluster
def get_cluster(self, cluster_name: str = ''):
session = self._create_session()
response = session.get(
self.service_url + CLUSTER.format(cluster_name))
return response
def delete_cluster(self, cluster_name: str = ''):
session = self._create_session()
response = session.delete(
self.service_url + CLUSTER.format(cluster_name))
return response
# service_url need to provide "http://" prefix
def create_cluster(self, cluster_name: str, broker_url: str, service_url: str = '',
service_url_tls: str = '', broker_url_tls: str = '',
proxy_url: str = '', proxy_protocol: str = "SNI", peer_cluster_names: list = [],
):
# initialize data
data = {
'serviceUrl': service_url,
'serviceUrlTls': service_url_tls,
'brokerServiceUrl': broker_url,
'brokerServiceUrlTls': broker_url_tls,
'peerClusterNames': peer_cluster_names,
'proxyServiceUrl': proxy_url,
'proxyProtocol': proxy_protocol
}
session = self._create_session()
response = session.put(
self.service_url + CLUSTER.format(cluster_name), data=json.dumps(data))
return response
def update_cluster(self, cluster_name: str, broker_url: str, service_url: str = '',
service_url_tls: str = '', broker_url_tls: str = '',
proxy_url: str = '', proxy_protocol: str = "SNI", peer_cluster_names: list = [],
):
# initialize data
data = {
'serviceUrl': service_url,
'serviceUrlTls': service_url_tls,
'brokerServiceUrl': broker_url,
'brokerServiceUrlTls': broker_url_tls,
'peerClusterNames': peer_cluster_names,
'proxyServiceUrl': proxy_url,
'proxyProtocol': proxy_protocol
}
session = self._create_session()
response = session.post(
self.service_url + CLUSTER.format(cluster_name), data=json.dumps(data))
return response
# tenants
def get_tenant(self, tenant: str = ''):
session = self._create_session()
response = session.get(self.service_url + TENANT.format(tenant))
return response
def create_tenant(self, tenant: str, admins: list, clusters: list):
session = self._create_session()
data = {'adminRoles': admins,
'allowedClusters': clusters}
response = session.put(
self.service_url + TENANT.format(tenant), data=json.dumps(data))
return response
def delete_tenant(self, tenant: str):
session = self._create_session()
response = session.delete(
self.service_url + TENANT.format(tenant))
return response
def update_tenant(self, tenant: str, admins: list, clusters: list):
session = self._create_session()
data = {'adminRoles': admins,
'allowedClusters': clusters}
response = session.post(
self.service_url + TENANT.format(tenant), data=json.dumps(data))
return response
# namespace
def get_namespace(self, tenant: str):
session = self._create_session()
response = session.get(
self.service_url + 'namespaces/{}'.format(tenant))
return response
# 'replication_clusters' is always required
def create_namespace(self, tenant: str, namespace: str, policies: dict = {}):
session = self._create_session()
response = session.put(
self.service_url + 'namespaces/{}/{}'.format(tenant, namespace),
data=json.dumps(policies)
)
return response
def delete_namespace(self, tenant: str, namespace: str):
session = self._create_session()
response = session.delete(
self.service_url +
'namespaces/{}/{}'.format(tenant, namespace)
)
return response
def set_clusters_to_namespace(self, tenant: str, namespace: str, clusters: list):
session = self._create_session()
response = session.post(
self.service_url + 'namespaces/{}/{}/replication'.format(tenant, namespace), json=clusters
)
return response
def get_cluster_from_namespace(self, tenant: str, namespace: str):
session = self._create_session()
response = session.get(
self.service_url +
'namespaces/{}/{}/replication'.format(tenant, namespace)
)
return response
def set_subscription_expiration_time(self, tenant: str, namespace: str, mintues: int = 0):
session = self._create_session()
response = session.post(
self.service_url + 'namespaces/{}/{}/subscriptionExpirationTime'.format(tenant, namespace), json=mintues
)
return response
def set_message_ttl(self, tenant: str, namespace: str, mintues: int = 0):
session = self._create_session()
response = session.post(
# the API accepts data as seconds
self.service_url + 'namespaces/{}/{}/messageTTL'.format(tenant, namespace), json=mintues * 60
)
return response
def unsubscribe_namespace_all_topics(self, tenant: str, namespace: str, subscription_name: str):
session = self._create_session()
response = session.post(
self.service_url +
'namespaces/{}/{}/unsubscribe/{}'.format(
tenant, namespace, subscription_name)
)
return response
def set_retention(self, tenant: str, namespace: str,
retention_time_in_minutes: int = 0, retention_size_in_MB: int = 0):
session = self._create_session()
data = {'retentionTimeInMinutes': retention_time_in_minutes,
'retentionSizeInMB': retention_size_in_MB}
response = session.post(
self.service_url +
'namespaces/{}/{}/retention'.format(tenant, namespace), data=json.dumps(data)
)
return response
def remove_retention(self, tenant: str, namespace: str):
session = self._create_session()
response = session.delete(
self.service_url +
'namespaces/{}/{}/retention'.format(tenant, namespace),
)
return response
# topic
def unsubscribe_topic(self, tenant: str, namespace: str, topic: str, subscription_name: str):
session = self._create_session()
response = session.delete(
self.service_url +
'persistent/{}/{}/{}/subscription/{}'.format(
tenant, namespace, topic, subscription_name)
)
return response
| 9,002 | 34.167969 | 116 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/pulsar/__init__.py
|
from fate_arch.federation.pulsar._federation import Federation, MQ, PulsarManager
__all__ = ['Federation', 'MQ', 'PulsarManager']
| 132 | 25.6 | 81 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/pulsar/_mq_channel.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pulsar
from fate_arch.common import log
from fate_arch.federation._nretry import nretry
LOGGER = log.getLogger()
CHANNEL_TYPE_PRODUCER = "producer"
CHANNEL_TYPE_CONSUMER = "consumer"
DEFAULT_TENANT = "fl-tenant"
DEFAULT_CLUSTER = "standalone"
TOPIC_PREFIX = "{}/{}/{}"
UNIQUE_PRODUCER_NAME = "unique_producer"
UNIQUE_CONSUMER_NAME = "unique_consumer"
DEFAULT_SUBSCRIPTION_NAME = "unique"
# A channel cloud only be able to send or receive message.
class MQChannel(object):
# TODO add credential to secure pulsar cluster
def __init__(
self,
host,
port,
tenant,
namespace,
send_topic,
receive_topic,
src_party_id,
src_role,
dst_party_id,
dst_role,
credential=None,
extra_args: dict = None,
):
# "host:port" is used to connect the pulsar broker
self._host = host
self._port = port
self._tenant = tenant
self._namespace = namespace
self._send_topic = send_topic
self._receive_topic = receive_topic
self._credential = credential
self._src_party_id = src_party_id
self._src_role = src_role
self._dst_party_id = dst_party_id
self._dst_role = dst_role
self._extra_args = extra_args
# "_channel" is the subscriptor for the topic
self._producer_send = None
self._producer_conn = None
self._consumer_receive = None
self._consumer_conn = None
self._sequence_id = None
# these are pulsar message id
self._latest_confirmed = None
self._first_confirmed = None
self._subscription_config = {}
if self._extra_args.get("subscription") is not None:
self._subscription_config.update(self._extra_args["subscription"])
self._producer_config = {}
if self._extra_args.get("producer") is not None:
self._producer_config.update(self._extra_args["producer"])
self._consumer_config = {}
if self._extra_args.get("consumer") is not None:
self._consumer_config.update(self._extra_args["consumer"])
# splitting the creation of producer and producer to avoid resource wasted
@nretry
def produce(self, body, properties):
self._get_or_create_producer()
LOGGER.debug("send queue: {}".format(self._producer_send.topic()))
LOGGER.debug("send data size: {}".format(len(body)))
message_id = self._producer_send.send(
content=body, properties=properties)
if message_id is None:
raise Exception("publish failed")
self._sequence_id = message_id
@nretry
def consume(self):
self._get_or_create_consumer()
try:
LOGGER.debug("receive topic: {}".format(
self._consumer_receive.topic()))
receive_timeout = self._consumer_config.get(
'receive_timeout_millis', None)
if receive_timeout is not None:
LOGGER.debug(
f"receive timeout millis {receive_timeout}")
message = self._consumer_receive.receive(
timeout_millis=receive_timeout)
return message
except Exception:
self._consumer_receive.seek(pulsar.MessageId.earliest)
raise TimeoutError("meet receive timeout, try to reset the cursor")
@nretry
def ack(self, message):
# assume consumer is alive
try:
self._consumer_receive.acknowledge(message)
self._latest_confirmed = message
if self._first_confirmed is None:
self._first_confirmed = message
except Exception as e:
LOGGER.debug("meet {} when trying to ack message".format(e))
self._get_or_create_consumer()
self._consumer_receive.negative_acknowledge(message)
@nretry
def unack_all(self):
self._get_or_create_consumer()
self._consumer_receive.seek(pulsar.MessageId.earliest)
@nretry
def cancel(self):
if self._consumer_conn is not None:
try:
self._consumer_receive.close()
self._consumer_conn.close()
except Exception as e:
LOGGER.debug("meet {} when trying to close consumer".format(e))
self._consumer_receive = None
self._consumer_conn = None
if self._producer_conn is not None:
try:
self._producer_send.close()
self._producer_conn.close()
except Exception as e:
LOGGER.debug("meet {} when trying to close producer".format(e))
self._producer_send = None
self._producer_conn = None
def _get_or_create_producer(self):
if self._check_producer_alive() != True:
# if self._producer_conn is None:
try:
self._producer_conn = pulsar.Client(
service_url="pulsar://{}:{}".format(
self._host, self._port),
operation_timeout_seconds=30,
)
except Exception as e:
self._producer_conn = None
# alway used current client to fetch producer
try:
self._producer_send = self._producer_conn.create_producer(
TOPIC_PREFIX.format(
self._tenant, self._namespace, self._send_topic
),
producer_name=UNIQUE_PRODUCER_NAME,
send_timeout_millis=60000,
max_pending_messages=500,
compression_type=pulsar.CompressionType.LZ4,
**self._producer_config,
)
except Exception as e:
LOGGER.debug(
f"catch exception {e} in creating pulsar producer")
self._producer_conn = None
def _get_or_create_consumer(self):
if not self._check_consumer_alive():
try:
self._consumer_conn = pulsar.Client(
service_url="pulsar://{}:{}".format(
self._host, self._port),
operation_timeout_seconds=30,
)
except Exception:
self._consumer_conn = None
try:
self._consumer_receive = self._consumer_conn.subscribe(
TOPIC_PREFIX.format(
self._tenant, self._namespace, self._receive_topic
),
subscription_name=DEFAULT_SUBSCRIPTION_NAME,
consumer_name=UNIQUE_CONSUMER_NAME,
initial_position=pulsar.InitialPosition.Earliest,
replicate_subscription_state_enabled=True,
**self._subscription_config,
)
# set cursor to latest confirmed
if self._latest_confirmed is not None:
self._consumer_receive.seek(self._latest_confirmed)
except Exception as e:
LOGGER.debug(
f"catch exception {e} in creating pulsar consumer")
self._consumer_conn.close()
self._consumer_conn = None
def _check_producer_alive(self):
if self._producer_conn is None or self._producer_send is None:
return False
try:
self._producer_conn.get_topic_partitions("test-alive")
self._producer_send.flush()
return True
except Exception as e:
LOGGER.debug("catch {}, closing producer client".format(e))
if self._producer_conn is not None:
try:
self._producer_conn.close()
except Exception:
pass
self._producer_conn = None
self._producer_send = None
return False
def _check_consumer_alive(self):
try:
if self._latest_confirmed is not None:
self._consumer_receive.acknowledge(self._latest_confirmed)
return True
else:
return False
except Exception as e:
self._consumer_conn = None
self._consumer_receive = None
return False
| 9,061 | 33.853846 | 79 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/rabbitmq/_federation.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from fate_arch.common import Party
from fate_arch.common import file_utils
from fate_arch.common.log import getLogger
from fate_arch.federation._federation import FederationBase
from fate_arch.federation.rabbitmq._mq_channel import MQChannel
from fate_arch.federation.rabbitmq._rabbit_manager import RabbitManager
LOGGER = getLogger()
# default message max size in bytes = 1MB
DEFAULT_MESSAGE_MAX_SIZE = 1048576
class MQ(object):
def __init__(self, host, port, union_name, policy_id, route_table):
self.host = host
self.port = port
self.union_name = union_name
self.policy_id = policy_id
self.route_table = route_table
def __str__(self):
return (
f"MQ(host={self.host}, port={self.port}, union_name={self.union_name}, "
f"policy_id={self.policy_id}, route_table={self.route_table})"
)
def __repr__(self):
return self.__str__()
class _TopicPair(object):
def __init__(self, tenant=None, namespace=None, vhost=None, send=None, receive=None):
self.tenant = tenant
self.namespace = namespace
self.vhost = vhost
self.send = send
self.receive = receive
class Federation(FederationBase):
@staticmethod
def from_conf(
federation_session_id: str,
party: Party,
runtime_conf: dict,
**kwargs
):
rabbitmq_config = kwargs["rabbitmq_config"]
LOGGER.debug(f"rabbitmq_config: {rabbitmq_config}")
host = rabbitmq_config.get("host")
port = rabbitmq_config.get("port")
mng_port = rabbitmq_config.get("mng_port")
base_user = rabbitmq_config.get("user")
base_password = rabbitmq_config.get("password")
mode = rabbitmq_config.get("mode", "replication")
# max_message_size;
max_message_size = int(rabbitmq_config.get("max_message_size", DEFAULT_MESSAGE_MAX_SIZE))
union_name = federation_session_id
policy_id = federation_session_id
rabbitmq_run = runtime_conf.get("job_parameters", {}).get("rabbitmq_run", {})
LOGGER.debug(f"rabbitmq_run: {rabbitmq_run}")
max_message_size = int(rabbitmq_run.get(
"max_message_size", max_message_size))
LOGGER.debug(f"set max message size to {max_message_size} Bytes")
rabbit_manager = RabbitManager(
base_user, base_password, f"{host}:{mng_port}", rabbitmq_run
)
rabbit_manager.create_user(union_name, policy_id)
route_table_path = rabbitmq_config.get("route_table")
if route_table_path is None:
route_table_path = "conf/rabbitmq_route_table.yaml"
route_table = file_utils.load_yaml_conf(conf_path=route_table_path)
mq = MQ(host, port, union_name, policy_id, route_table)
conf = rabbit_manager.runtime_config.get(
"connection", {}
)
return Federation(
federation_session_id, party, mq, rabbit_manager, max_message_size, conf, mode
)
def __init__(self, session_id, party: Party, mq: MQ, rabbit_manager: RabbitManager, max_message_size, conf, mode):
super().__init__(session_id=session_id, party=party, mq=mq, max_message_size=max_message_size, conf=conf)
self._rabbit_manager = rabbit_manager
self._vhost_set = set()
self._mode = mode
def __getstate__(self):
pass
def destroy(self, parties):
LOGGER.debug("[rabbitmq.cleanup]start to cleanup...")
for party in parties:
if self._party == party:
continue
vhost = self._get_vhost(party)
LOGGER.debug(f"[rabbitmq.cleanup]start to cleanup vhost {vhost}...")
self._rabbit_manager.clean(vhost)
LOGGER.debug(f"[rabbitmq.cleanup]cleanup vhost {vhost} done")
if self._mq.union_name:
LOGGER.debug(f"[rabbitmq.cleanup]clean user {self._mq.union_name}.")
self._rabbit_manager.delete_user(user=self._mq.union_name)
def _get_vhost(self, party):
low, high = (
(self._party, party) if self._party < party else (party, self._party)
)
vhost = (
f"{self._session_id}-{low.role}-{low.party_id}-{high.role}-{high.party_id}"
)
return vhost
def _maybe_create_topic_and_replication(self, party, topic_suffix):
if self._mode == "replication":
return self._create_topic_by_replication_mode(party, topic_suffix)
if self._mode == "client":
return self._create_topic_by_client_mode(party, topic_suffix)
raise ValueError("mode={self._mode} is not support!")
def _create_topic_by_client_mode(self, party, topic_suffix):
# gen names
vhost_name = self._get_vhost(party)
send_queue_name = f"{self._session_id}-{self._party.role}-{self._party.party_id}-{party.role}-{party.party_id}-{topic_suffix}"
receive_queue_name = f"{self._session_id}-{party.role}-{party.party_id}-{self._party.role}-{self._party.party_id}-{topic_suffix}"
topic_pair = _TopicPair(
namespace=self._session_id,
vhost=vhost_name,
send=send_queue_name,
receive=receive_queue_name
)
# initial vhost
if topic_pair.vhost not in self._vhost_set:
self._rabbit_manager.create_vhost(topic_pair.vhost)
self._rabbit_manager.add_user_to_vhost(
self._mq.union_name, topic_pair.vhost
)
self._vhost_set.add(topic_pair.vhost)
# initial send queue, the name is send-${vhost}
self._rabbit_manager.create_queue(topic_pair.vhost, topic_pair.send)
# initial receive queue, the name is receive-${vhost}
self._rabbit_manager.create_queue(topic_pair.vhost, topic_pair.receive)
return topic_pair
def _create_topic_by_replication_mode(self, party, topic_suffix):
# gen names
vhost_name = self._get_vhost(party)
send_queue_name = f"send-{self._session_id}-{self._party.role}-{self._party.party_id}-{party.role}-{party.party_id}-{topic_suffix}"
receive_queue_name = f"receive-{self._session_id}-{party.role}-{party.party_id}-{self._party.role}-{self._party.party_id}-{topic_suffix}"
topic_pair = _TopicPair(
namespace=self._session_id,
vhost=vhost_name,
send=send_queue_name,
receive=receive_queue_name
)
# initial vhost
if topic_pair.vhost not in self._vhost_set:
self._rabbit_manager.create_vhost(topic_pair.vhost)
self._rabbit_manager.add_user_to_vhost(
self._mq.union_name, topic_pair.vhost
)
self._vhost_set.add(topic_pair.vhost)
# initial send queue, the name is send-${vhost}
self._rabbit_manager.create_queue(topic_pair.vhost, topic_pair.send)
# initial receive queue, the name is receive-${vhost}
self._rabbit_manager.create_queue(
topic_pair.vhost, topic_pair.receive
)
upstream_uri = self._upstream_uri(party_id=party.party_id)
self._rabbit_manager.federate_queue(
upstream_host=upstream_uri,
vhost=topic_pair.vhost,
send_queue_name=topic_pair.send,
receive_queue_name=topic_pair.receive,
)
return topic_pair
def _upstream_uri(self, party_id):
host = self._mq.route_table.get(int(party_id)).get("host")
port = self._mq.route_table.get(int(party_id)).get("port")
upstream_uri = (
f"amqp://{self._mq.union_name}:{self._mq.policy_id}@{host}:{port}"
)
return upstream_uri
def _get_channel(
self, topic_pair, src_party_id, src_role, dst_party_id, dst_role, mq=None, conf: dict = None):
LOGGER.debug(f"rabbitmq federation _get_channel, src_party_id={src_party_id}, src_role={src_role},"
f"dst_party_id={dst_party_id}, dst_role={dst_role}")
return MQChannel(
host=mq.host,
port=mq.port,
user=mq.union_name,
password=mq.policy_id,
namespace=topic_pair.namespace,
vhost=topic_pair.vhost,
send_queue_name=topic_pair.send,
receive_queue_name=topic_pair.receive,
src_party_id=src_party_id,
src_role=src_role,
dst_party_id=dst_party_id,
dst_role=dst_role,
extra_args=conf,
)
def _get_consume_message(self, channel_info):
for method, properties, body in channel_info.consume():
LOGGER.debug(
f"[rabbitmq._get_consume_message] method: {method}, properties: {properties}"
)
properties = {
"message_id": properties.message_id,
"correlation_id": properties.correlation_id,
"content_type": properties.content_type,
"headers": json.dumps(properties.headers)
}
yield method.delivery_tag, properties, body
def _consume_ack(self, channel_info, id):
channel_info.ack(delivery_tag=id)
| 9,853 | 37.193798 | 145 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/rabbitmq/_rabbit_manager.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
import time
from fate_arch.common import log
LOGGER = log.getLogger()
C_HTTP_TEMPLATE = "http://{}/api/{}"
C_COMMON_HTTP_HEADER = {'Content-Type': 'application/json'}
"""
APIs are refered to https://rawcdn.githack.com/rabbitmq/rabbitmq-management/v3.8.3/priv/www/api/index.html
"""
class RabbitManager:
def __init__(self, user, password, endpoint, runtime_config=None):
self.user = user
self.password = password
self.endpoint = endpoint
# The runtime_config defines the parameters to create queue, exchange .etc
self.runtime_config = runtime_config if runtime_config is not None else {}
def create_user(self, user, password):
url = C_HTTP_TEMPLATE.format(self.endpoint, "users/" + user)
body = {
"password": password,
"tags": ""
}
result = requests.put(url, headers=C_COMMON_HTTP_HEADER,
json=body, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.create_user] {result}")
if result.status_code == 201 or result.status_code == 204:
return True
else:
return False
def delete_user(self, user):
url = C_HTTP_TEMPLATE.format(self.endpoint, "users/" + user)
result = requests.delete(url, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.delete_user] {result}")
return result
def create_vhost(self, vhost):
url = C_HTTP_TEMPLATE.format(self.endpoint, "vhosts/" + vhost)
result = requests.put(
url, headers=C_COMMON_HTTP_HEADER, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.create_vhost] {result}")
self.add_user_to_vhost(self.user, vhost)
return True
def delete_vhost(self, vhost):
url = C_HTTP_TEMPLATE.format(self.endpoint, "vhosts/" + vhost)
result = requests.delete(url, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.delete_vhost] {result}")
return result
def delete_vhosts(self):
result = self.get_vhosts()
names = None
try:
if result.status_code == 200:
names = [e["name"] for e in result.json()]
except BaseException:
names = None
LOGGER.debug(f"[rabbitmanager.delete_vhosts] {names}")
if names is not None:
LOGGER.debug("[rabbitmanager.delete_vhosts]start to delete_vhosts")
for name in names:
self.delete_vhost(name)
def get_vhosts(self):
url = C_HTTP_TEMPLATE.format(self.endpoint, "vhosts")
result = requests.get(url, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.get_vhosts] {result}")
return result
def add_user_to_vhost(self, user, vhost):
url = C_HTTP_TEMPLATE.format(
self.endpoint, "{}/{}/{}".format("permissions", vhost, user))
body = {
"configure": ".*",
"write": ".*",
"read": ".*"
}
result = requests.put(url, headers=C_COMMON_HTTP_HEADER,
json=body, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.add_user_to_vhost] {result}")
if result.status_code == 201 or result.status_code == 204:
return True
else:
return False
def remove_user_from_vhost(self, user, vhost):
url = C_HTTP_TEMPLATE.format(
self.endpoint, "{}/{}/{}".format("permissions", vhost, user))
result = requests.delete(url, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.remove_user_from_vhost] {result}")
return result
def get_exchanges(self, vhost):
url = C_HTTP_TEMPLATE.format(self.endpoint, "{}/{}".format("exchanges", vhost))
result = requests.get(url, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.get_exchanges] {result}")
try:
if result.status_code == 200:
exchange_names = [e["name"] for e in result.json()]
LOGGER.debug(f"[rabbitmanager.get_exchanges] exchange_names={exchange_names}")
return exchange_names
else:
return None
except BaseException:
return None
def create_exchange(self, vhost, exchange_name):
url = C_HTTP_TEMPLATE.format(
self.endpoint, "{}/{}/{}".format("exchanges", vhost, exchange_name))
basic_config = {
"type": "direct",
"auto_delete": False,
"durable": True,
"internal": False,
"arguments": {}
}
exchange_runtime_config = self.runtime_config.get("exchange", {})
basic_config.update(exchange_runtime_config)
result = requests.put(url, headers=C_COMMON_HTTP_HEADER,
json=basic_config, auth=(self.user, self.password))
LOGGER.debug(result)
return result
def delete_exchange(self, vhost, exchange_name):
url = C_HTTP_TEMPLATE.format(
self.endpoint, "{}/{}/{}".format("exchanges", vhost, exchange_name))
result = requests.delete(url, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.delete_exchange] vhost={vhost}, exchange_name={exchange_name}, {result}")
return result
def get_policies(self, vhost):
url = C_HTTP_TEMPLATE.format(self.endpoint, "{}/{}".format("policies", vhost))
result = requests.get(url, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.get_policies] {result}")
try:
if result.status_code == 200:
policies_names = [e["name"] for e in result.json()]
LOGGER.debug(f"[rabbitmanager.get_policies] policies_names={policies_names}")
return policies_names
else:
return None
except BaseException:
return None
def delete_policy(self, vhost, policy_name):
url = C_HTTP_TEMPLATE.format(
self.endpoint, "{}/{}/{}".format("policies", vhost, policy_name))
result = requests.delete(url, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.delete_policy] vhost={vhost}, policy_name={policy_name}, {result}")
return result
def create_queue(self, vhost, queue_name):
url = C_HTTP_TEMPLATE.format(
self.endpoint, "{}/{}/{}".format("queues", vhost, queue_name))
basic_config = {
"auto_delete": False,
"durable": True,
"arguments": {}
}
queue_runtime_config = self.runtime_config.get("queue", {})
basic_config.update(queue_runtime_config)
LOGGER.debug(basic_config)
result = requests.put(url, headers=C_COMMON_HTTP_HEADER,
json=basic_config, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.create_queue] {result}")
if result.status_code == 201 or result.status_code == 204:
return True
else:
return False
def get_queue(self, vhost, queue_name):
url = C_HTTP_TEMPLATE.format(
self.endpoint, "{}/{}/{}".format("queues", vhost, queue_name))
result = requests.get(url, headers=C_COMMON_HTTP_HEADER, auth=(self.user, self.password))
return result
def get_queues(self, vhost):
url = C_HTTP_TEMPLATE.format(
self.endpoint, "{}/{}".format("queues", vhost))
result = requests.get(url, headers=C_COMMON_HTTP_HEADER, auth=(self.user, self.password))
try:
if result.status_code == 200:
queue_names = [e["name"] for e in result.json()]
LOGGER.debug(f"[rabbitmanager.get_all_queue] queue_names={queue_names}")
return queue_names
else:
return None
except BaseException:
return None
def delete_queue(self, vhost, queue_name):
url = C_HTTP_TEMPLATE.format(
self.endpoint, "{}/{}/{}".format("queues", vhost, queue_name))
result = requests.delete(url, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.delete_queue] vhost={vhost}, queue_name={queue_name}, {result}")
return result
def get_connections(self):
url = C_HTTP_TEMPLATE.format(
self.endpoint, "connections")
result = requests.get(url, headers=C_COMMON_HTTP_HEADER, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager.get_connections] {result}")
return result
def delete_connections(self, vhost=None):
result = self.get_connections()
names = None
try:
if result.status_code == 200:
if vhost is None:
names = [e["name"] for e in result.json()]
else:
names = [e["name"] for e in result.json() if e["vhost"] == vhost]
except BaseException:
names = None
LOGGER.debug(f"[rabbitmanager.delete_connections] {names}")
if names is not None:
LOGGER.debug("[rabbitmanager.delete_connections] start....")
for name in names:
url = C_HTTP_TEMPLATE.format(
self.endpoint, "{}/{}".format("connections", name))
result = requests.delete(url, auth=(self.user, self.password))
LOGGER.debug(result)
def bind_exchange_to_queue(self, vhost, exchange_name, queue_name):
url = C_HTTP_TEMPLATE.format(self.endpoint, "{}/{}/e/{}/q/{}".format("bindings",
vhost,
exchange_name,
queue_name))
body = {
"routing_key": queue_name,
"arguments": {}
}
result = requests.post(
url, headers=C_COMMON_HTTP_HEADER, json=body, auth=(self.user, self.password))
LOGGER.debug(result)
return result
def unbind_exchange_to_queue(self, vhost, exchange_name, queue_name):
url = C_HTTP_TEMPLATE.format(self.endpoint, "{}/{}/e/{}/q/{}/{}".format("bindings",
vhost,
exchange_name,
queue_name,
queue_name))
result = requests.delete(url, auth=(self.user, self.password))
LOGGER.debug(result)
return result
def _set_federated_upstream(self, upstream_host, vhost, receive_queue_name):
url = C_HTTP_TEMPLATE.format(self.endpoint, "{}/{}/{}/{}".format("parameters",
"federation-upstream",
vhost,
receive_queue_name))
upstream_runtime_config = self.runtime_config.get("upstream", {})
upstream_runtime_config['uri'] = upstream_host
upstream_runtime_config['queue'] = receive_queue_name.replace(
"receive", "send", 1)
body = {
"value": upstream_runtime_config
}
LOGGER.debug(f"[rabbitmanager._set_federated_upstream]set_federated_upstream, url: {url} body: {body}")
result = requests.put(url, headers=C_COMMON_HTTP_HEADER,
json=body, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager._set_federated_upstream] {result}")
if result.status_code != 201 and result.status_code != 204:
LOGGER.debug(f"[rabbitmanager._set_federated_upstream] _set_federated_upstream fail. {result}")
return False
return True
def _unset_federated_upstream(self, upstream_name, vhost):
url = C_HTTP_TEMPLATE.format(self.endpoint, "{}/{}/{}/{}".format("parameters",
"federation-upstream",
vhost,
upstream_name))
result = requests.delete(url, auth=(self.user, self.password))
LOGGER.debug(result)
return result
def _set_federated_queue_policy(self, vhost, receive_queue_name):
url = C_HTTP_TEMPLATE.format(self.endpoint, "{}/{}/{}".format("policies",
vhost,
receive_queue_name))
body = {
"pattern": '^' + receive_queue_name + '$',
"apply-to": "queues",
"definition":
{
"federation-upstream": receive_queue_name
}
}
LOGGER.debug(f"[rabbitmanager._set_federated_queue_policy]set_federated_queue_policy, url: {url} body: {body}")
result = requests.put(url, headers=C_COMMON_HTTP_HEADER,
json=body, auth=(self.user, self.password))
LOGGER.debug(f"[rabbitmanager._set_federated_queue_policy] {result}")
if result.status_code != 201 and result.status_code != 204:
LOGGER.debug(f"[rabbitmanager._set_federated_queue_policy] _set_federated_queue_policy fail. {result}")
return False
return True
def _unset_federated_queue_policy(self, policy_name, vhost):
url = C_HTTP_TEMPLATE.format(self.endpoint, "{}/{}/{}".format("policies",
vhost,
policy_name))
result = requests.delete(url, auth=(self.user, self.password))
LOGGER.debug(result)
return result
# Create federate queue with upstream
def federate_queue(self, upstream_host, vhost, send_queue_name, receive_queue_name):
time.sleep(0.1)
LOGGER.debug(f"[rabbitmanager.federate_queue] create federate_queue {receive_queue_name}")
result = self._set_federated_upstream(
upstream_host, vhost, receive_queue_name)
if result is False:
# should be logged
LOGGER.debug(f"[rabbitmanager.federate_queue] result_set_upstream fail.")
return False
result = self._set_federated_queue_policy(
vhost, receive_queue_name)
if result is False:
LOGGER.debug(f"[rabbitmanager.federate_queue] result_set_policy fail.")
return False
return True
def de_federate_queue(self, vhost, receive_queue_name):
result = self._unset_federated_queue_policy(receive_queue_name, vhost)
LOGGER.debug(
f"delete federate queue policy status code: {result.status_code}")
result = self._unset_federated_upstream(receive_queue_name, vhost)
LOGGER.debug(
f"delete federate queue upstream status code: {result.status_code}")
return True
def clean(self, vhost):
time.sleep(1)
queue_names = self.get_queues(vhost)
if queue_names is not None:
for name in queue_names:
self.delete_queue(vhost, name)
exchange_names = self.get_exchanges(vhost)
if exchange_names is not None:
for name in exchange_names:
self.delete_exchange(vhost, name)
policy_names = self.get_policies(vhost)
if policy_names is not None:
for name in policy_names:
self.delete_policy(vhost, name)
self.delete_vhost(vhost=vhost)
time.sleep(1)
self.delete_connections(vhost=vhost)
| 16,795 | 40.369458 | 119 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/rabbitmq/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.federation.rabbitmq._federation import Federation, MQ, RabbitManager
__all__ = ['Federation', 'MQ', 'RabbitManager']
| 750 | 36.55 | 83 |
py
|
FATE
|
FATE-master/python/fate_arch/federation/rabbitmq/_mq_channel.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import pika
from fate_arch.common import log
from fate_arch.federation._nretry import nretry
LOGGER = log.getLogger()
class MQChannel(object):
def __init__(self,
host,
port,
user,
password,
namespace,
vhost,
send_queue_name,
receive_queue_name,
src_party_id,
src_role,
dst_party_id,
dst_role,
extra_args: dict):
self._host = host
self._port = port
self._credentials = pika.PlainCredentials(user, password)
self._namespace = namespace
self._vhost = vhost
self._send_queue_name = send_queue_name
self._receive_queue_name = receive_queue_name
self._src_party_id = src_party_id
self._src_role = src_role
self._dst_party_id = dst_party_id
self._dst_role = dst_role
self._conn = None
self._channel = None
self._extra_args = extra_args
if "heartbeat" not in self._extra_args:
self._extra_args["heartbeat"] = 3600
def __str__(self):
return (
f"MQChannel(host={self._host}, port={self._port}, namespace={self._namespace}, "
f"src_party_id={self._src_party_id}, src_role={self._src_role},"
f"dst_party_id={self._dst_party_id}, dst_role={self._dst_role},"
f"send_queue_name={self._send_queue_name}, receive_queue_name={self._receive_queue_name}),"
)
def __repr__(self):
return self.__str__()
@nretry
def produce(self, body, properties: dict):
self._get_channel()
LOGGER.debug(f"send queue: {self._send_queue_name}")
if "headers" in properties:
headers = json.loads(properties["headers"])
else:
headers = {}
properties = pika.BasicProperties(
content_type=properties["content_type"],
app_id=properties["app_id"],
message_id=properties["message_id"],
correlation_id=properties["correlation_id"],
headers=headers,
delivery_mode=1,
)
return self._channel.basic_publish(exchange='', routing_key=self._send_queue_name, body=body,
properties=properties)
@nretry
def consume(self):
self._get_channel()
LOGGER.debug(f"receive queue: {self._receive_queue_name}")
return self._channel.consume(queue=self._receive_queue_name)
@nretry
def ack(self, delivery_tag):
self._get_channel()
return self._channel.basic_ack(delivery_tag=delivery_tag)
@nretry
def cancel(self):
self._get_channel()
return self._channel.cancel()
def _get_channel(self):
if self._check_alive():
return
else:
self._clear()
if not self._conn:
self._conn = pika.BlockingConnection(pika.ConnectionParameters(host=self._host, port=self._port,
virtual_host=self._vhost,
credentials=self._credentials,
**self._extra_args))
if not self._channel:
self._channel = self._conn.channel()
self._channel.confirm_delivery()
def _clear(self):
try:
if self._conn and self._conn.is_open:
self._conn.close()
self._conn = None
if self._channel and self._channel.is_open:
self._channel.close()
self._channel = None
except Exception as e:
LOGGER.exception(e)
self._conn = None
self._channel = None
def _check_alive(self):
return self._channel and self._channel.is_open and self._conn and self._conn.is_open
| 4,670 | 32.12766 | 108 |
py
|
FATE
|
FATE-master/python/fate_test/setup.py
|
# -*- coding: utf-8 -*-
from setuptools import setup
packages = ["fate_test", "fate_test.scripts", "fate_test.scripts.op_test", "fate_test.flow_test"]
package_data = {"": ["*"]}
install_requires = [
"click>=7.1.2,<8.0.0",
"fate_client>=1.11,<2.0",
"loguru>=0.6.0",
"pandas>=1.1.5",
"poetry>=0.12",
"prettytable>=1.0.0,<2.0.0",
"requests>=2.24.0,<3.0.0",
"requests_toolbelt>=0.9.1,<0.10.0",
"ruamel.yaml>=0.16.10,<0.17.0",
"sshtunnel>=0.1.5,<0.2.0",
'colorama>=0.4.4'
]
entry_points = {"console_scripts": ["fate_test = fate_test.scripts.cli:cli"]}
setup_kwargs = {
"name": "fate-test",
"version": "1.11.2",
"description": "test tools for FATE",
"long_description": 'FATE Test\n=========\n\nA collection of useful tools to running FATE\'s test.\n\n.. image:: images/tutorial.gif\n :align: center\n :alt: tutorial\n\nquick start\n-----------\n\n1. (optional) create virtual env\n\n .. code-block:: bash\n\n python -m venv venv\n source venv/bin/activate\n pip install -U pip\n\n\n2. install fate_test\n\n .. code-block:: bash\n\n pip install fate_test\n fate_test --help\n\n\n3. edit default fate_test_config.yaml\n\n .. code-block:: bash\n\n # edit priority config file with system default editor\n # filling some field according to comments\n fate_test config edit\n\n4. configure FATE-Pipeline and FATE-Flow Commandline server setting\n\n.. code-block:: bash\n\n # configure FATE-Pipeline server setting\n pipeline init --port 9380 --ip 127.0.0.1\n # configure FATE-Flow Commandline server setting\n flow init --port 9380 --ip 127.0.0.1\n\n5. run some fate_test suite\n\n .. code-block:: bash\n\n fate_test suite -i <path contains *testsuite.json>\n\n\n6. run some fate_test benchmark\n\n .. code-block:: bash\n\n fate_test benchmark-quality -i <path contains *benchmark.json>\n\n7. useful logs or exception will be saved to logs dir with namespace shown in last step\n\ndevelop install\n---------------\nIt is more convenient to use the editable mode during development: replace step 2 with flowing steps\n\n.. code-block:: bash\n\n pip install -e ${FATE}/python/fate_client && pip install -e ${FATE}/python/fate_test\n\n\n\ncommand types\n-------------\n\n- suite: used for running testsuites, collection of FATE jobs\n\n .. code-block:: bash\n\n fate_test suite -i <path contains *testsuite.json>\n\n\n- benchmark-quality used for comparing modeling quality between FATE and other machine learning systems\n\n .. code-block:: bash\n\n fate_test benchmark-quality -i <path contains *benchmark.json>\n\n\n\nconfiguration by examples\n--------------------------\n\n1. no need ssh tunnel:\n\n - 9999, service: service_a\n - 10000, service: service_b\n\n and both service_a, service_b can be requested directly:\n\n .. code-block:: yaml\n\n work_mode: 1 # 0 for standalone, 1 for cluster\n data_base_dir: <path_to_data>\n parties:\n guest: [10000]\n host: [9999, 10000]\n arbiter: [9999]\n services:\n - flow_services:\n - {address: service_a, parties: [9999]}\n - {address: service_b, parties: [10000]}\n\n2. need ssh tunnel:\n\n - 9999, service: service_a\n - 10000, service: service_b\n\n service_a, can be requested directly while service_b don\'t,\n but you can request service_b in other node, say B:\n\n .. code-block:: yaml\n\n work_mode: 0 # 0 for standalone, 1 for cluster\n data_base_dir: <path_to_data>\n parties:\n guest: [10000]\n host: [9999, 10000]\n arbiter: [9999]\n services:\n - flow_services:\n - {address: service_a, parties: [9999]}\n - flow_services:\n - {address: service_b, parties: [10000]}\n ssh_tunnel: # optional\n enable: true\n ssh_address: <ssh_ip_to_B>:<ssh_port_to_B>\n ssh_username: <ssh_username_to B>\n ssh_password: # optional\n ssh_priv_key: "~/.ssh/id_rsa"\n\n\nTestsuite\n---------\n\nTestsuite is used for running a collection of jobs in sequence. Data used for jobs could be uploaded before jobs are\nsubmitted, and are cleaned when jobs finished. This tool is useful for FATE\'s release test.\n\ncommand options\n~~~~~~~~~~~~~~~\n\n.. code-block:: bash\n\n fate_test suite --help\n\n1. include:\n\n .. code-block:: bash\n\n fate_test suite -i <path1 contains *testsuite.json>\n\n will run testsuites in *path1*\n\n2. exclude:\n\n .. code-block:: bash\n\n fate_test suite -i <path1 contains *testsuite.json> -e <path2 to exclude> -e <path3 to exclude> ...\n\n will run testsuites in *path1* but not in *path2* and *path3*\n\n3. glob:\n\n .. code-block:: bash\n\n fate_test suite -i <path1 contains *testsuite.json> -g "hetero*"\n\n will run testsuites in sub directory start with *hetero* of *path1*\n\n4. replace:\n\n .. code-block:: bash\n\n fate_test suite -i <path1 contains *testsuite.json> -r \'{"maxIter": 5}\'\n\n will find all key-value pair with key "maxIter" in `data conf` or `conf` or `dsl` and replace the value with 5\n\n\n5. skip-data:\n\n .. code-block:: bash\n\n fate_test suite -i <path1 contains *testsuite.json> --skip-data\n\n will run testsuites in *path1* without uploading data specified in *benchmark.json*.\n\n\n6. yes:\n\n .. code-block:: bash\n\n fate_test suite -i <path1 contains *testsuite.json> --yes\n\n will run testsuites in *path1* directly, skipping double check\n\n7. skip-dsl-jobs:\n\n .. code-block:: bash\n\n fate_test suite -i <path1 contains *testsuite.json> --skip-dsl-jobs\n\n will run testsuites in *path1* but skip all *tasks* in testsuites. It\'s would be useful when only pipeline tasks needed.\n\n8. skip-pipeline-jobs:\n\n .. code-block:: bash\n\n fate_test suite -i <path1 contains *testsuite.json> --skip-pipeline-jobs\n\n will run testsuites in *path1* but skip all *pipeline tasks* in testsuites. It\'s would be useful when only dsl tasks needed.\n\n\nBenchmark Quality\n------------------\n\nBenchmark-quality is used for comparing modeling quality between FATE\nand other machine learning systems. Benchmark produces a metrics comparison\nsummary for each benchmark job group.\n\n.. code-block:: bash\n\n fate_test benchmark-quality -i examples/benchmark_quality/hetero_linear_regression\n\n.. code-block:: bash\n\n +-------+--------------------------------------------------------------+\n | Data | Name |\n +-------+--------------------------------------------------------------+\n | train | {\'guest\': \'motor_hetero_guest\', \'host\': \'motor_hetero_host\'} |\n | test | {\'guest\': \'motor_hetero_guest\', \'host\': \'motor_hetero_host\'} |\n +-------+--------------------------------------------------------------+\n +------------------------------------+--------------------+--------------------+-------------------------+---------------------+\n | Model Name | explained_variance | r2_score | root_mean_squared_error | mean_squared_error |\n +------------------------------------+--------------------+--------------------+-------------------------+---------------------+\n | local-linear_regression-regression | 0.9035168452250094 | 0.9035070863155368 | 0.31340413289880553 | 0.09822215051805216 |\n | FATE-linear_regression-regression | 0.903146386539082 | 0.9031411831961411 | 0.3139977881119483 | 0.09859461093919596 |\n +------------------------------------+--------------------+--------------------+-------------------------+---------------------+\n +-------------------------+-----------+\n | Metric | All Match |\n +-------------------------+-----------+\n | explained_variance | True |\n | r2_score | True |\n | root_mean_squared_error | True |\n | mean_squared_error | True |\n +-------------------------+-----------+\n\ncommand options\n~~~~~~~~~~~~~~~\n\nuse the following command to show help message\n\n.. code-block:: bash\n\n fate_test benchmark-quality --help\n\n1. include:\n\n .. code-block:: bash\n\n fate_test benchmark-quality -i <path1 contains *benchmark.json>\n\n will run benchmark testsuites in *path1*\n\n2. exclude:\n\n .. code-block:: bash\n\n fate_test benchmark-quality -i <path1 contains *benchmark.json> -e <path2 to exclude> -e <path3 to exclude> ...\n\n will run benchmark testsuites in *path1* but not in *path2* and *path3*\n\n3. glob:\n\n .. code-block:: bash\n\n fate_test benchmark-quality -i <path1 contains *benchmark.json> -g "hetero*"\n\n will run benchmark testsuites in sub directory start with *hetero* of *path1*\n\n4. tol:\n\n .. code-block:: bash\n\n fate_test benchmark-quality -i <path1 contains *benchmark.json> -t 1e-3\n\n will run benchmark testsuites in *path1* with absolute tolerance of difference between metrics set to 0.001.\n If absolute difference between metrics is smaller than *tol*, then metrics are considered\n almost equal. Check benchmark testsuite `writing guide <#benchmark-testsuite>`_ on setting alternative tolerance.\n\n5. skip-data:\n\n .. code-block:: bash\n\n fate_test benchmark-quality -i <path1 contains *benchmark.json> --skip-data\n\n will run benchmark testsuites in *path1* without uploading data specified in *benchmark.json*.\n\n\n6. yes:\n\n .. code-block:: bash\n\n fate_test benchmark-quality -i <path1 contains *benchmark.json> --yes\n\n will run benchmark testsuites in *path1* directly, skipping double check\n\n\nbenchmark testsuite\n~~~~~~~~~~~~~~~~~~~\n\nConfiguration of jobs should be specified in a benchmark testsuite whose file name ends\nwith "\\*benchmark.json". For benchmark testsuite example,\nplease refer `here <../../examples/benchmark_quality>`_.\n\nA benchmark testsuite includes the following elements:\n\n- data: list of local data to be uploaded before running FATE jobs\n\n - file: path to original data file to be uploaded, should be relative to testsuite or FATE installation path\n - head: whether file includes header\n - partition: number of partition for data storage\n - table_name: table name in storage\n - namespace: table namespace in storage\n - role: which role to upload the data, as specified in fate_test.config;\n naming format is: "{role_type}_{role_index}", index starts at 0\n\n .. code-block:: json\n\n "data": [\n {\n "file": "examples/data/motor_hetero_host.csv",\n "head": 1,\n "partition": 8,\n "table_name": "motor_hetero_host",\n "namespace": "experiment",\n "role": "host_0"\n }\n ]\n\n- job group: each group includes arbitrary number of jobs with paths to corresponding script and configuration\n\n - job: name of job to be run, must be unique within each group list\n\n - script: path to `testing script <#testing-script>`_, should be relative to testsuite\n - conf: path to job configuration file for script, should be relative to testsuite\n\n .. code-block:: json\n\n "local": {\n "script": "./local-linr.py",\n "conf": "./linr_config.yaml"\n }\n\n - compare_setting: additional setting for quality metrics comparison, currently only takes ``relative_tol``\n\n If metrics *a* and *b* satisfy *abs(a-b) <= max(relative_tol \\* max(abs(a), abs(b)), absolute_tol)*\n (from `math module <https://docs.python.org/3/library/math.html#math.isclose>`_),\n they are considered almost equal. In the below example, metrics from "local" and "FATE" jobs are\n considered almost equal if their relative difference is smaller than\n *0.05 \\* max(abs(local_metric), abs(pipeline_metric)*.\n\n .. code-block:: json\n\n "linear_regression-regression": {\n "local": {\n "script": "./local-linr.py",\n "conf": "./linr_config.yaml"\n },\n "FATE": {\n "script": "./fate-linr.py",\n "conf": "./linr_config.yaml"\n },\n "compare_setting": {\n "relative_tol": 0.01\n }\n }\n\n\ntesting script\n~~~~~~~~~~~~~~\n\nAll job scripts need to have ``Main`` function as an entry point for executing jobs; scripts should\nreturn two dictionaries: first with data information key-value pairs: {data_type}: {data_name_dictionary};\nthe second contains {metric_name}: {metric_value} key-value pairs for metric comparison.\n\nBy default, the final data summary shows the output from the job named "FATE"; if no such job exists,\ndata information returned by the first job is shown. For clear presentation, we suggest that user follow\nthis general `guideline <../../examples/data/README.md#data-set-naming-rule>`_ for data set naming. In the case of multi-host\ntask, consider numbering host as such:\n\n::\n\n {\'guest\': \'default_credit_homo_guest\',\n \'host_1\': \'default_credit_homo_host_1\',\n \'host_2\': \'default_credit_homo_host_2\'}\n\nReturned quality metrics of the same key are to be compared.\nNote that only **real-value** metrics can be compared.\n\n- FATE script: ``Main`` always has three inputs:\n\n - config: job configuration, `JobConfig <../fate_client/pipeline/utils/tools.py#L64>`_ object loaded from "fate_test_config.yaml"\n - param: job parameter setting, dictionary loaded from "conf" file specified in benchmark testsuite\n - namespace: namespace suffix, user-given *namespace* or generated timestamp string when using *namespace-mangling*\n\n- non-FATE script: ``Main`` always has one input:\n\n - param: job parameter setting, dictionary loaded from "conf" file specified in benchmark testsuite\n\n\ndata\n----\n\n`Data` sub-command is used for upload or delete dataset in suite\'s.\n\ncommand options\n~~~~~~~~~~~~~~~\n\n.. code-block:: bash\n\n fate_test data --help\n\n1. include:\n\n .. code-block:: bash\n\n fate_test data [upload|delete] -i <path1 contains *testsuite.json>\n\n will upload/delete dataset in testsuites in *path1*\n\n2. exclude:\n\n .. code-block:: bash\n\n fate_test data [upload|delete] -i <path1 contains *testsuite.json> -e <path2 to exclude> -e <path3 to exclude> ...\n\n will upload/delete dataset in testsuites in *path1* but not in *path2* and *path3*\n\n3. glob:\n\n .. code-block:: bash\n\n fate_test data [upload|delete] -i <path1 contains *testsuite.json> -g "hetero*"\n\n will upload/delete dataset in testsuites in sub directory start with *hetero* of *path1*\n\n\nfull command options\n---------------------\n\n.. click:: fate_test.scripts.cli:cli\n :prog: fate_test\n :show-nested:\n',
"author": "FederatedAI",
"author_email": "[email protected]",
"maintainer": None,
"maintainer_email": None,
"url": "https://fate.fedai.org/",
"packages": packages,
"package_data": package_data,
"install_requires": install_requires,
"entry_points": entry_points,
"python_requires": ">=3.6,<4.0",
}
setup(**setup_kwargs)
| 15,247 | 353.604651 | 14,180 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/_parser.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import typing
from collections import deque
from pathlib import Path
import click
import prettytable
from fate_test import _config
from fate_test._io import echo
from fate_test._config import Parties, Config
from fate_test.utils import TxtStyle
# noinspection PyPep8Naming
class chain_hook(object):
def __init__(self):
self._hooks = []
def add_hook(self, hook):
self._hooks.append(hook)
return self
def add_extend_namespace_hook(self, namespace):
self.add_hook(_namespace_hook(namespace))
return self
def add_replace_hook(self, mapping):
self.add_hook(_replace_hook(mapping))
def hook(self, d):
return self._chain_hooks(self._hooks, d)
@staticmethod
def _chain_hooks(hook_funcs, d):
for hook_func in hook_funcs:
if d is None:
return
d = hook_func(d)
return d
DATA_JSON_HOOK = chain_hook()
CONF_JSON_HOOK = chain_hook()
DSL_JSON_HOOK = chain_hook()
class Data(object):
def __init__(self, config: dict, role_str: str):
self.config = config
self.role_str = role_str
@staticmethod
def load(config, path: Path):
kwargs = {}
for field_name in config.keys():
if field_name not in ["file", "role"]:
kwargs[field_name] = config[field_name]
if config.get("engine", {}) != "PATH":
file_path = path.parent.joinpath(config["file"]).resolve()
if not file_path.exists():
kwargs["file"] = config["file"]
else:
kwargs["file"] = file_path
role_str = config.get("role") if config.get("role") != "guest" else "guest_0"
return Data(config=kwargs, role_str=role_str)
def update(self, config: Config):
update_dict = {}
if config.extend_sid is not None:
update_dict["extend_sid"] = config.extend_sid
if config.auto_increasing_sid is not None:
update_dict["auto_increasing_sid"] = config.auto_increasing_sid
self.config.update(update_dict)
class JobConf(object):
def __init__(self, initiator: dict, role: dict, job_parameters=None, **kwargs):
self.initiator = initiator
self.role = role
self.job_parameters = job_parameters if job_parameters else {}
self.others_kwargs = kwargs
def as_dict(self):
return dict(
initiator=self.initiator,
role=self.role,
job_parameters=self.job_parameters,
**self.others_kwargs,
)
@staticmethod
def load(path: Path):
with path.open("r") as f:
kwargs = json.load(f, object_hook=CONF_JSON_HOOK.hook)
return JobConf(**kwargs)
@property
def dsl_version(self):
return self.others_kwargs.get("dsl_version", 1)
def update(
self,
parties: Parties,
timeout,
job_parameters,
component_parameters,
):
self.initiator = parties.extract_initiator_role(self.initiator["role"])
self.role = parties.extract_role(
{role: len(parties) for role, parties in self.role.items()}
)
if timeout > 0:
self.update_job_common_parameters(timeout=timeout)
if timeout > 0:
self.update_job_common_parameters(timeout=timeout)
for key, value in job_parameters.items():
self.update_parameters(parameters=self.job_parameters, key=key, value=value)
for key, value in component_parameters.items():
if self.dsl_version == 1:
self.update_parameters(
parameters=self.others_kwargs.get("algorithm_parameters"),
key=key,
value=value,
)
else:
self.update_parameters(
parameters=self.others_kwargs.get("component_parameters"),
key=key,
value=value,
)
def update_parameters(self, parameters, key, value):
if isinstance(parameters, dict):
for keys in parameters:
if keys == key:
parameters.get(key).update(value),
elif isinstance(parameters[keys], dict):
self.update_parameters(parameters[keys], key, value)
def update_job_common_parameters(self, **kwargs):
if self.dsl_version == 1:
self.job_parameters.update(**kwargs)
else:
self.job_parameters.setdefault("common", {}).update(**kwargs)
def update_job_type(self, job_type="predict"):
if self.dsl_version == 1:
if self.job_parameters.get("job_type", None) is None:
self.job_parameters.update({"job_type": job_type})
else:
if self.job_parameters.setdefault("common", {}).get("job_type", None) is None:
self.job_parameters.setdefault("common", {}).update({"job_type": job_type})
def update_component_parameters(self, key, value, parameters=None):
if parameters is None:
if self.dsl_version == 1:
parameters = self.others_kwargs.get("algorithm_parameters")
else:
parameters = self.others_kwargs.get("component_parameters")
if isinstance(parameters, dict):
for keys in parameters:
if keys == key:
if isinstance(value, dict):
parameters[keys].update(value)
else:
parameters.update({key: value})
elif (
isinstance(parameters[keys], dict) and parameters[keys] is not None
):
self.update_component_parameters(key, value, parameters[keys])
def get_component_parameters(self, keys):
if len(keys) == 0:
return self.others_kwargs.get("component_parameters") if self.dsl_version == 2 else self.others_kwargs.get(
"role_parameters")
if self.dsl_version == 1:
parameters = self.others_kwargs.get("role_parameters")
else:
parameters = self.others_kwargs.get("component_parameters").get("role")
for key in keys:
parameters = parameters[key]
return parameters
class JobDSL(object):
def __init__(self, components: dict, provider=None):
self.components = components
self.provider = provider
@staticmethod
def load(path: Path, provider):
with path.open("r") as f:
kwargs = json.load(f, object_hook=DSL_JSON_HOOK.hook)
if provider is not None:
kwargs["provider"] = provider
return JobDSL(**kwargs)
def as_dict(self):
if self.provider is None:
return dict(components=self.components)
else:
return dict(components=self.components, provider=self.provider)
class Job(object):
def __init__(
self,
job_name: str,
job_conf: JobConf,
job_dsl: typing.Optional[JobDSL],
pre_works: list,
):
self.job_name = job_name
self.job_conf = job_conf
self.job_dsl = job_dsl
self.pre_works = pre_works
@classmethod
def load(cls, job_name, job_configs, base: Path, provider):
job_conf = JobConf.load(base.joinpath(job_configs.get("conf")).resolve())
job_dsl = job_configs.get("dsl", None)
if job_dsl is not None:
job_dsl = JobDSL.load(base.joinpath(job_dsl).resolve(), provider)
pre_works = []
pre_works_value = {}
deps_dict = {}
if job_configs.get("model_deps", None):
pre_works.append(job_configs["model_deps"])
deps_dict["model_deps"] = {'name': job_configs["model_deps"]}
elif job_configs.get("deps", None):
pre_works.append(job_configs["deps"])
deps_dict["model_deps"] = {'name': job_configs["deps"]}
if job_configs.get("data_deps", None):
deps_dict["data_deps"] = {'data': job_configs["data_deps"]}
pre_works.append(list(job_configs["data_deps"].keys())[0])
deps_dict["data_deps"].update({'name': list(job_configs["data_deps"].keys())})
if job_configs.get("cache_deps", None):
pre_works.append(job_configs["cache_deps"])
deps_dict["cache_deps"] = {'name': job_configs["cache_deps"]}
if job_configs.get("model_loader_deps", None):
pre_works.append(job_configs["model_loader_deps"])
deps_dict["model_loader_deps"] = {'name': job_configs["model_loader_deps"]}
pre_works_value.update(deps_dict)
_config.deps_alter[job_name] = pre_works_value
return Job(
job_name=job_name, job_conf=job_conf, job_dsl=job_dsl, pre_works=pre_works
)
@property
def submit_params(self):
return dict(
conf=self.job_conf.as_dict(),
dsl=self.job_dsl.as_dict() if self.job_dsl else None,
)
def set_pre_work(self, name, **kwargs):
self.job_conf.update_job_common_parameters(**kwargs)
self.job_conf.update_job_type("predict")
def set_input_data(self, hierarchys, table_info):
for table_name, hierarchy in zip(table_info, hierarchys):
key = list(table_name.keys())[0]
value = table_name[key]
self.job_conf.update_component_parameters(
key=key,
value=value,
parameters=self.job_conf.get_component_parameters(hierarchy),
)
def is_submit_ready(self):
return len(self.pre_works) == 0
class PipelineJob(object):
def __init__(self, job_name: str, script_path: Path):
self.job_name = job_name
self.script_path = script_path
class Testsuite(object):
def __init__(
self,
dataset: typing.List[Data],
jobs: typing.List[Job],
pipeline_jobs: typing.List[PipelineJob],
path: Path,
):
self.dataset = dataset
self.jobs = jobs
self.pipeline_jobs = pipeline_jobs
self.path = path
self.suite_name = Path(self.path).stem
self._dependency: typing.MutableMapping[str, typing.List[Job]] = {}
self._final_status: typing.MutableMapping[str, FinalStatus] = {}
self._ready_jobs = deque()
for job in self.jobs:
for name in job.pre_works:
self._dependency.setdefault(name, []).append(job)
self._final_status[job.job_name] = FinalStatus(job.job_name)
if job.is_submit_ready():
self._ready_jobs.appendleft(job)
for job in self.pipeline_jobs:
self._final_status[job.job_name] = FinalStatus(job.job_name)
@staticmethod
def load(path: Path, provider):
with path.open("r") as f:
testsuite_config = json.load(f, object_hook=DATA_JSON_HOOK.hook)
dataset = []
for d in testsuite_config.get("data"):
if "use_local_data" not in d:
d.update({"use_local_data": _config.use_local_data})
dataset.append(Data.load(d, path))
jobs = []
for job_name, job_configs in testsuite_config.get("tasks", {}).items():
jobs.append(
Job.load(job_name=job_name, job_configs=job_configs, base=path.parent, provider=provider)
)
pipeline_jobs = []
if testsuite_config.get("pipeline_tasks", None) is not None and provider is not None:
echo.echo('[Warning] Pipeline does not support parameter: provider-> {}'.format(provider))
for job_name, job_configs in testsuite_config.get("pipeline_tasks", {}).items():
script_path = path.parent.joinpath(job_configs["script"]).resolve()
pipeline_jobs.append(PipelineJob(job_name, script_path))
testsuite = Testsuite(dataset, jobs, pipeline_jobs, path)
return testsuite
def jobs_iter(self) -> typing.Generator[Job, None, None]:
while self._ready_jobs:
yield self._ready_jobs.pop()
@staticmethod
def style_table(txt):
colored_txt = txt.replace("success", f"{TxtStyle.TRUE_VAL}success{TxtStyle.END}")
colored_txt = colored_txt.replace("failed", f"{TxtStyle.FALSE_VAL}failed{TxtStyle.END}")
colored_txt = colored_txt.replace("not submitted", f"{TxtStyle.FALSE_VAL}not submitted{TxtStyle.END}")
return colored_txt
def pretty_final_summary(self, time_consuming, suite_file=None):
"""table = prettytable.PrettyTable(
["job_name", "job_id", "status", "time_consuming", "exception_id", "rest_dependency"]
)"""
table = prettytable.PrettyTable()
table.set_style(prettytable.ORGMODE)
field_names = ["job_name", "job_id", "status", "time_consuming", "exception_id", "rest_dependency"]
table.field_names = field_names
for status in self.get_final_status().values():
if status.status != "success":
status.suite_file = suite_file
_config.non_success_jobs.append(status)
if status.exception_id != "-":
exception_id_txt = f"{TxtStyle.FALSE_VAL}{status.exception_id}{TxtStyle.END}"
else:
exception_id_txt = f"{TxtStyle.FIELD_VAL}{status.exception_id}{TxtStyle.END}"
table.add_row(
[
f"{TxtStyle.FIELD_VAL}{status.name}{TxtStyle.END}",
f"{TxtStyle.FIELD_VAL}{status.job_id}{TxtStyle.END}",
self.style_table(status.status),
f"{TxtStyle.FIELD_VAL}{time_consuming.pop(0) if status.job_id != '-' else '-'}{TxtStyle.END}",
f"{exception_id_txt}",
f"{TxtStyle.FIELD_VAL}{','.join(status.rest_dependency)}{TxtStyle.END}",
]
)
return table.get_string(title=f"{TxtStyle.TITLE}Testsuite Summary: {self.suite_name}{TxtStyle.END}")
def model_in_dep(self, name):
return name in self._dependency
def get_dependent_jobs(self, name):
return self._dependency[name]
def remove_dependency(self, name):
del self._dependency[name]
def feed_dep_info(self, job, name, model_info=None, table_info=None, cache_info=None, model_loader_info=None):
if model_info is not None:
job.set_pre_work(name, **model_info)
if table_info is not None:
job.set_input_data(table_info["hierarchy"], table_info["table_info"])
if cache_info is not None:
job.set_input_data(cache_info["hierarchy"], cache_info["cache_info"])
if model_loader_info is not None:
job.set_input_data(model_loader_info["hierarchy"], model_loader_info["model_loader_info"])
if name in job.pre_works:
job.pre_works.remove(name)
if job.is_submit_ready():
self._ready_jobs.appendleft(job)
def reflash_configs(self, config: Config):
failed = []
for job in self.jobs:
try:
job.job_conf.update(
config.parties, None, {}, {}
)
except ValueError as e:
failed.append((job, e))
return failed
def update_status(
self, job_name, job_id: str = None, status: str = None, exception_id: str = None
):
for k, v in locals().items():
if k != "job_name" and v is not None:
setattr(self._final_status[job_name], k, v)
def get_final_status(self):
for name, jobs in self._dependency.items():
for job in jobs:
self._final_status[job.job_name].rest_dependency.append(name)
return self._final_status
class FinalStatus(object):
def __init__(
self,
name: str,
job_id: str = "-",
status: str = "not submitted",
exception_id: str = "-",
rest_dependency: typing.List[str] = None,
):
self.name = name
self.job_id = job_id
self.status = status
self.exception_id = exception_id
self.rest_dependency = rest_dependency or []
self.suite_file = None
class BenchmarkJob(object):
def __init__(self, job_name: str, script_path: Path, conf_path: Path):
self.job_name = job_name
self.script_path = script_path
self.conf_path = conf_path
class BenchmarkPair(object):
def __init__(
self, pair_name: str, jobs: typing.List[BenchmarkJob], compare_setting: dict
):
self.pair_name = pair_name
self.jobs = jobs
self.compare_setting = compare_setting
class BenchmarkSuite(object):
def __init__(
self, dataset: typing.List[Data], pairs: typing.List[BenchmarkPair], path: Path
):
self.dataset = dataset
self.pairs = pairs
self.path = path
@staticmethod
def load(path: Path):
with path.open("r") as f:
testsuite_config = json.load(f, object_hook=DATA_JSON_HOOK.hook)
dataset = []
for d in testsuite_config.get("data"):
dataset.append(Data.load(d, path))
pairs = []
for pair_name, pair_configs in testsuite_config.items():
if pair_name == "data":
continue
jobs = []
for job_name, job_configs in pair_configs.items():
if job_name == "compare_setting":
continue
script_path = path.parent.joinpath(job_configs["script"]).resolve()
if job_configs.get("conf"):
conf_path = path.parent.joinpath(job_configs["conf"]).resolve()
else:
conf_path = ""
jobs.append(
BenchmarkJob(
job_name=job_name, script_path=script_path, conf_path=conf_path
)
)
compare_setting = pair_configs.get("compare_setting")
if compare_setting and not isinstance(compare_setting, dict):
raise ValueError(
f"expected 'compare_setting' type is dict, received {type(compare_setting)} instead."
)
pairs.append(
BenchmarkPair(
pair_name=pair_name, jobs=jobs, compare_setting=compare_setting
)
)
suite = BenchmarkSuite(dataset=dataset, pairs=pairs, path=path)
return suite
def non_success_summary():
status = {}
for job in _config.non_success_jobs:
if job.status not in status.keys():
status[job.status] = prettytable.PrettyTable(
["testsuite_name", "job_name", "job_id", "status", "exception_id", "rest_dependency"]
)
status[job.status].add_row(
[
job.suite_file,
job.name,
job.job_id,
job.status,
job.exception_id,
",".join(job.rest_dependency),
]
)
for k, v in status.items():
echo.echo("\n" + "#" * 60)
echo.echo(v.get_string(title=f"{k} job record"), fg='red')
def _namespace_hook(namespace):
def _hook(d):
if d is None:
return d
if "namespace" in d and namespace:
d["namespace"] = f"{d['namespace']}_{namespace}"
return d
return _hook
def _replace_hook(mapping: dict):
def _hook(d):
for k, v in mapping.items():
if k in d:
d[k] = v
return d
return _hook
class JsonParamType(click.ParamType):
name = "json_string"
def convert(self, value, param, ctx):
try:
return json.loads(value)
except ValueError:
self.fail(f"{value} is not a valid json string", param, ctx)
JSON_STRING = JsonParamType()
| 20,726 | 34.613402 | 119 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/_io.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
import loguru
from fate_test._ascii import HEAD, TAIL, BENCHMARK
# noinspection PyPep8Naming
class echo(object):
_file = None
@classmethod
def set_file(cls, file):
cls._file = file
@classmethod
def echo(cls, message, **kwargs):
click.secho(message, **kwargs)
click.secho(message, file=cls._file, **kwargs)
@classmethod
def file(cls, message, **kwargs):
click.secho(message, file=cls._file, **kwargs)
@classmethod
def stdout(cls, message, **kwargs):
click.secho(message, **kwargs)
@classmethod
def stdout_newline(cls):
click.secho("")
@classmethod
def welcome(cls, banner_type="testsuite"):
if banner_type == "testsuite":
cls.echo(HEAD)
elif banner_type == "benchmark":
cls.echo(BENCHMARK)
@classmethod
def farewell(cls):
cls.echo(TAIL)
@classmethod
def flush(cls):
import sys
sys.stdout.flush()
def set_logger(name):
loguru.logger.remove()
loguru.logger.add(name, level='ERROR', delay=True)
return loguru.logger
LOGGER = loguru.logger
| 1,771 | 23.957746 | 75 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/_config.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import typing
from collections import namedtuple
from pathlib import Path
from ruamel import yaml
template = """\
# base dir for data upload conf eg, data_base_dir={FATE}
# examples/data/breast_hetero_guest.csv -> $data_base_dir/examples/data/breast_hetero_guest.csv
data_base_dir: path(FATE)
# directory dedicated to fate_test job file storage, default cache location={FATE}/examples/cache/
cache_directory: examples/cache/
# directory stores performance benchmark suites, default location={FATE}/examples/benchmark_performance
performance_template_directory: examples/benchmark_performance/
# directory stores flow test config, default location={FATE}/examples/flow_test_template/hetero_lr/flow_test_config.yaml
flow_test_config_directory: examples/flow_test_template/hetero_lr/flow_test_config.yaml
# directory stores testsuite file with min_test data sets to upload,
# default location={FATE}/examples/data/upload_config/min_test_data_testsuite.json
min_test_data_config: examples/data/upload_config/min_test_data_testsuite.json
# directory stores testsuite file with all example data sets to upload,
# default location={FATE}/examples/data/upload_config/all_examples_data_testsuite.json
all_examples_data_config: examples/data/upload_config/all_examples_data_testsuite.json
# directory where FATE code locates, default installation location={FATE}/fate
# python/federatedml -> $fate_base/python/federatedml
fate_base: path(FATE)/fate
# whether to delete data in suites after all jobs done
clean_data: true
# participating parties' id and correponding flow service ip & port information
parties:
guest: [9999]
host: [10000, 9999]
arbiter: [10000]
services:
- flow_services:
- {address: 127.0.0.1:9380, parties: [9999, 10000]}
serving_setting:
address: 127.0.0.1:8059
ssh_tunnel: # optional
enable: false
ssh_address: <remote ip>:<remote port>
ssh_username:
ssh_password: # optional
ssh_priv_key: "~/.ssh/id_rsa"
# what is ssh_tunnel?
# to open the ssh tunnel(s) if the remote service
# cannot be accessed directly from the location where the test suite is run!
#
# +---------------------+
# | ssh address |
# | ssh username |
# | ssh password/ |
# +--------+ | ssh priv_key | +----------------+
# |local ip+----------ssh tuunel-------------->+remote local ip |
# +--------+ | | +----------------+
# | |
# request local ip:port +----- as if --------->request remote's local ip:port from remote side
# | |
# | |
# +---------------------+
#
"""
data_base_dir = Path(__file__).resolve().parents[3]
if (data_base_dir / 'examples').is_dir():
template = template.replace('path(FATE)', str(data_base_dir))
_default_config = Path(__file__).resolve().parent / 'fate_test_config.yaml'
data_switch = None
use_local_data = 1
data_alter = dict()
deps_alter = dict()
jobs_num = 0
jobs_progress = 0
non_success_jobs = []
def create_config(path: Path, override=False):
if path.exists() and not override:
raise FileExistsError(f"{path} exists")
with path.open("w") as f:
f.write(template)
def default_config():
if not _default_config.exists():
create_config(_default_config)
return _default_config
class Parties(object):
def __init__(self, **kwargs):
"""
mostly, accept guest, host and arbiter
"""
self._role_to_parties = kwargs
self._party_to_role_string = {}
for role in kwargs:
parties = kwargs[role]
setattr(self, role, parties)
for i, party in enumerate(parties):
if party not in self._party_to_role_string:
self._party_to_role_string[party] = set()
self._party_to_role_string[party].add(f"{role.lower()}_{i}")
@staticmethod
def from_dict(d: typing.MutableMapping[str, typing.List[int]]):
return Parties(**d)
def party_to_role_string(self, party):
return self._party_to_role_string[party]
def extract_role(self, counts: typing.MutableMapping[str, int]):
roles = {}
for role, num in counts.items():
if role not in self._role_to_parties and num > 0:
raise ValueError(f"{role} not found in config")
else:
if len(self._role_to_parties[role]) < num:
raise ValueError(f"require {num} {role} parties, only {len(self._role_to_parties[role])} in config")
roles[role] = self._role_to_parties[role][:num]
return roles
def extract_initiator_role(self, role):
initiator_role = role.strip()
if len(self._role_to_parties[initiator_role]) < 1:
raise ValueError(f"role {initiator_role} has empty party list")
party_id = self._role_to_parties[initiator_role][0]
return dict(role=initiator_role, party_id=party_id)
class Config(object):
service = namedtuple("service", ["address"])
tunnel_service = namedtuple("tunnel_service", ["tunnel_id", "index"])
tunnel = namedtuple("tunnel", ["ssh_address", "ssh_username", "ssh_password", "ssh_priv_key", "services_address"])
def __init__(self, config):
self.data_base_dir = config["data_base_dir"]
self.cache_directory = os.path.join(config["data_base_dir"], config["cache_directory"])
self.perf_template_dir = os.path.join(config["data_base_dir"], config["performance_template_directory"])
self.flow_test_config_dir = os.path.join(config["data_base_dir"], config["flow_test_config_directory"])
self.min_test_data_config = os.path.join(config["data_base_dir"], config["min_test_data_config"])
self.all_examples_data_config = os.path.join(config["data_base_dir"], config["all_examples_data_config"])
self.fate_base = config["fate_base"]
self.clean_data = config.get("clean_data", True)
self.parties = Parties.from_dict(config["parties"])
self.role = config["parties"]
self.serving_setting = config["services"][0]
self.party_to_service_id = {}
self.service_id_to_service = {}
self.tunnel_id_to_tunnel = {}
self.extend_sid = None
self.auto_increasing_sid = None
tunnel_id = 0
service_id = 0
os.makedirs(os.path.dirname(self.cache_directory), exist_ok=True)
for service_config in config["services"]:
flow_services = service_config["flow_services"]
if service_config.get("ssh_tunnel", {}).get("enable", False):
tunnel_id += 1
services_address = []
for index, flow_service in enumerate(flow_services):
service_id += 1
address_host, address_port = flow_service["address"].split(":")
address_port = int(address_port)
services_address.append((address_host, address_port))
self.service_id_to_service[service_id] = self.tunnel_service(tunnel_id, index)
for party in flow_service["parties"]:
self.party_to_service_id[party] = service_id
tunnel_config = service_config["ssh_tunnel"]
ssh_address_host, ssh_address_port = tunnel_config["ssh_address"].split(":")
self.tunnel_id_to_tunnel[tunnel_id] = self.tunnel((ssh_address_host, int(ssh_address_port)),
tunnel_config["ssh_username"],
tunnel_config["ssh_password"],
tunnel_config["ssh_priv_key"],
services_address)
else:
for flow_service in flow_services:
service_id += 1
address = flow_service["address"]
self.service_id_to_service[service_id] = self.service(address)
for party in flow_service["parties"]:
self.party_to_service_id[party] = service_id
@staticmethod
def load(path: typing.Union[str, Path], **kwargs):
if isinstance(path, str):
path = Path(path)
config = {}
if path is not None:
with path.open("r") as f:
config.update(yaml.safe_load(f))
if config["data_base_dir"] == "path(FATE)":
raise ValueError("Invalid 'data_base_dir'.")
config["data_base_dir"] = path.resolve().joinpath(config["data_base_dir"]).resolve()
config.update(kwargs)
return Config(config)
@staticmethod
def load_from_file(path: typing.Union[str, Path]):
"""
Loads conf content from json or yaml file. Used to read in parameter configuration
Parameters
----------
path: str, path to conf file, should be absolute path
Returns
-------
dict, parameter configuration in dictionary format
"""
if isinstance(path, str):
path = Path(path)
config = {}
if path is not None:
file_type = path.suffix
with path.open("r") as f:
if file_type == ".yaml":
config.update(yaml.safe_load(f))
elif file_type == ".json":
config.update(json.load(f))
else:
raise ValueError(f"Cannot load conf from file type {file_type}")
return config
def parse_config(config):
try:
config_inst = Config.load(config)
except Exception as e:
raise RuntimeError(f"error parse config from {config}") from e
return config_inst
| 10,690 | 39.343396 | 120 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
from colorama import init, deinit, Fore, Style
import math
import numpy as np
from fate_test._io import echo
from prettytable import PrettyTable, ORGMODE
SCRIPT_METRICS = "script_metrics"
DISTRIBUTION_METRICS = "distribution_metrics"
ALL = "all"
RELATIVE = "relative"
ABSOLUTE = "absolute"
class TxtStyle:
TRUE_VAL = Fore.GREEN
FALSE_VAL = Fore.RED + Style.BRIGHT
TITLE = Fore.BLUE
FIELD_VAL = Fore.YELLOW
DATA_FIELD_VAL = Fore.CYAN
END = Style.RESET_ALL
def show_data(data):
data_table = PrettyTable()
data_table.set_style(ORGMODE)
data_table.field_names = ["Data", "Information"]
for name, table_name in data.items():
row = [name, f"{TxtStyle.DATA_FIELD_VAL}{table_name}{TxtStyle.END}"]
data_table.add_row(row)
echo.echo(data_table.get_string(title=f"{TxtStyle.TITLE}Data Summary{TxtStyle.END}"))
echo.echo("\n")
def _get_common_metrics(**results):
common_metrics = None
for result in results.values():
if common_metrics is None:
common_metrics = set(result.keys())
else:
common_metrics = common_metrics & result.keys()
if SCRIPT_METRICS in common_metrics:
common_metrics.remove(SCRIPT_METRICS)
return list(common_metrics)
def _filter_results(metrics, **results):
filtered_results = {}
for model_name, result in results.items():
model_result = [result.get(metric, None) for metric in metrics]
if None in model_result:
continue
filtered_results[model_name] = model_result
return filtered_results
def style_table(txt):
colored_txt = txt.replace("True", f"{TxtStyle.TRUE_VAL}True{TxtStyle.END}")
colored_txt = colored_txt.replace("False", f"{TxtStyle.FALSE_VAL}False{TxtStyle.END}")
return colored_txt
def evaluate_almost_equal(metrics, results, abs_tol=None, rel_tol=None):
"""
Evaluate for each given metric if values in results are almost equal
Parameters
----------
metrics: List[str], metrics names
results: dict, results to be evaluated
abs_tol: float, absolute error tolerance
rel_tol: float, relative difference tolerance
Returns
-------
bool, return True if all metrics in results are almost equal
"""
# return False if empty
if len(metrics) == 0:
return False
eval_summary = {}
for i, metric in enumerate(metrics):
v_eval = [res[i] for res in results.values()]
first_v = v_eval[0]
if metric == SCRIPT_METRICS:
continue
if abs_tol is not None and rel_tol is not None:
eval_summary[metric] = all(math.isclose(v, first_v, abs_tol=abs_tol, rel_tol=rel_tol) for v in v_eval)
elif abs_tol is not None:
eval_summary[metric] = all(math.isclose(v, first_v, abs_tol=abs_tol) for v in v_eval)
elif rel_tol is not None:
eval_summary[metric] = all(math.isclose(v, first_v, rel_tol=rel_tol) for v in v_eval)
else:
eval_summary[metric] = all(math.isclose(v, first_v) for v in v_eval)
all_match = all(eval_summary.values())
return eval_summary, all_match
def _distribution_metrics(**results):
filtered_metric_group = _filter_results([DISTRIBUTION_METRICS], **results)
for script, model_results_pair in filtered_metric_group.items():
metric_results = model_results_pair[0]
common_metrics = _get_common_metrics(**metric_results)
filtered_results = _filter_results(common_metrics, **metric_results)
table = PrettyTable()
table.set_style(ORGMODE)
script_model_names = list(filtered_results.keys())
table.field_names = ["Script Model Name"] + common_metrics
for script_model_name in script_model_names:
row = [f"{script}-{script_model_name}"] + [f"{TxtStyle.FIELD_VAL}{v}{TxtStyle.END}" for v in
filtered_results[script_model_name]]
table.add_row(row)
echo.echo(table.get_string(title=f"{TxtStyle.TITLE}{script} distribution metrics{TxtStyle.END}"))
echo.echo("\n" + "#" * 60)
def match_script_metrics(abs_tol, rel_tol, match_details, **results):
filtered_metric_group = _filter_results([SCRIPT_METRICS], **results)
for script, model_results_pair in filtered_metric_group.items():
metric_results = model_results_pair[0]
common_metrics = _get_common_metrics(**metric_results)
filtered_results = _filter_results(common_metrics, **metric_results)
table = PrettyTable()
table.set_style(ORGMODE)
script_model_names = list(filtered_results.keys())
table.field_names = ["Script Model Name"] + common_metrics
for script_model_name in script_model_names:
row = [f"{script_model_name}-{script}"] + [f"{TxtStyle.FIELD_VAL}{v}{TxtStyle.END}" for v in
filtered_results[script_model_name]]
table.add_row(row)
echo.echo(table.get_string(title=f"{TxtStyle.TITLE}{script} Script Metrics Summary{TxtStyle.END}"))
_all_match(common_metrics, filtered_results, abs_tol, rel_tol, script, match_details=match_details)
def match_metrics(evaluate, group_name, abs_tol=None, rel_tol=None, storage_tag=None, history_tag=None,
fate_version=None, cache_directory=None, match_details=None, **results):
"""
Get metrics
Parameters
----------
evaluate: bool, whether to evaluate metrics are almost equal, and include compare results in output report
group_name: str, group name of all models
abs_tol: float, max tolerance of absolute error to consider two metrics to be almost equal
rel_tol: float, max tolerance of relative difference to consider two metrics to be almost equal
storage_tag: str, metrics information storage tag
history_tag: str, historical metrics information comparison tag
fate_version: str, FATE version
cache_directory: str, Storage path of metrics information
match_details: str, Error value display in algorithm comparison
results: dict of model name: metrics
Returns
-------
match result
"""
init(autoreset=True)
common_metrics = _get_common_metrics(**results)
filtered_results = _filter_results(common_metrics, **results)
table = PrettyTable()
table.set_style(ORGMODE)
model_names = list(filtered_results.keys())
table.field_names = ["Model Name"] + common_metrics
for model_name in model_names:
row = [f"{model_name}-{group_name}"] + [f"{TxtStyle.FIELD_VAL}{v}{TxtStyle.END}" for v in
filtered_results[model_name]]
table.add_row(row)
echo.echo(table.get_string(title=f"{TxtStyle.TITLE}Metrics Summary{TxtStyle.END}"))
if evaluate and len(filtered_results.keys()) > 1:
_all_match(common_metrics, filtered_results, abs_tol, rel_tol, match_details=match_details)
_distribution_metrics(**results)
match_script_metrics(abs_tol, rel_tol, match_details, **results)
if history_tag:
history_tag = ["_".join([i, group_name]) for i in history_tag]
comparison_quality(group_name, history_tag, cache_directory, abs_tol, rel_tol, match_details, **results)
if storage_tag:
storage_tag = "_".join(['FATE', fate_version, storage_tag, group_name])
_save_quality(storage_tag, cache_directory, **results)
deinit()
def _match_error(metrics, results):
relative_error_list = []
absolute_error_list = []
if len(metrics) == 0:
return False
for i, v in enumerate(metrics):
v_eval = [res[i] for res in results.values()]
absolute_error_list.append(f"{TxtStyle.FIELD_VAL}{abs(max(v_eval) - min(v_eval))}{TxtStyle.END}")
relative_error_list.append(
f"{TxtStyle.FIELD_VAL}{abs((max(v_eval) - min(v_eval)) / max(v_eval))}{TxtStyle.END}")
return relative_error_list, absolute_error_list
def _all_match(common_metrics, filtered_results, abs_tol, rel_tol, script=None, match_details=None):
eval_summary, all_match = evaluate_almost_equal(common_metrics, filtered_results, abs_tol, rel_tol)
eval_table = PrettyTable()
eval_table.set_style(ORGMODE)
field_names = ["Metric", "All Match"]
relative_error_list, absolute_error_list = _match_error(common_metrics, filtered_results)
for i, metric in enumerate(eval_summary.keys()):
row = [metric, eval_summary.get(metric)]
if match_details == ALL:
field_names = ["Metric", "All Match", "max_relative_error", "max_absolute_error"]
row += [relative_error_list[i], absolute_error_list[i]]
elif match_details == RELATIVE:
field_names = ["Metric", "All Match", "max_relative_error"]
row += [relative_error_list[i]]
elif match_details == ABSOLUTE:
field_names = ["Metric", "All Match", "max_absolute_error"]
row += [absolute_error_list[i]]
eval_table.add_row(row)
eval_table.field_names = field_names
echo.echo(style_table(eval_table.get_string(title=f"{TxtStyle.TITLE}Match Results{TxtStyle.END}")))
script = "" if script is None else f"{script} "
if all_match:
echo.echo(f"All {script}Metrics Match: {TxtStyle.TRUE_VAL}{all_match}{TxtStyle.END}")
else:
echo.echo(f"All {script}Metrics Match: {TxtStyle.FALSE_VAL}{all_match}{TxtStyle.END}")
def comparison_quality(group_name, history_tags, cache_directory, abs_tol, rel_tol, match_details, **results):
def regression_group(results_dict):
metric = {}
for k, v in results_dict.items():
if not isinstance(v, dict):
metric[k] = v
return metric
def class_group(class_dict):
metric = {}
for k, v in class_dict.items():
if not isinstance(v, dict):
metric[k] = v
for k, v in class_dict['distribution_metrics'].items():
metric.update(v)
return metric
history_info_dir = "/".join([os.path.join(os.path.abspath(cache_directory), 'benchmark_history',
"benchmark_quality.json")])
assert os.path.exists(history_info_dir), f"Please check the {history_info_dir} Is it deleted"
with open(history_info_dir, 'r') as f:
benchmark_quality = json.load(f, object_hook=dict)
regression_metric = {}
regression_quality = {}
class_quality = {}
for history_tag in history_tags:
for tag in benchmark_quality:
if '_'.join(tag.split("_")[2:]) == history_tag and SCRIPT_METRICS in results["FATE"]:
regression_metric[tag] = regression_group(benchmark_quality[tag]['FATE'])
for key, value in _filter_results([SCRIPT_METRICS], **benchmark_quality[tag])['FATE'][0].items():
regression_quality["_".join([tag, key])] = value
elif '_'.join(tag.split("_")[2:]) == history_tag and DISTRIBUTION_METRICS in results["FATE"]:
class_quality[tag] = class_group(benchmark_quality[tag]['FATE'])
if SCRIPT_METRICS in results["FATE"] and regression_metric:
regression_metric[group_name] = regression_group(results['FATE'])
metric_compare(abs_tol, rel_tol, match_details, **regression_metric)
for key, value in _filter_results([SCRIPT_METRICS], **results)['FATE'][0].items():
regression_quality["_".join([group_name, key])] = value
metric_compare(abs_tol, rel_tol, match_details, **regression_quality)
echo.echo("\n" + "#" * 60)
elif DISTRIBUTION_METRICS in results["FATE"] and class_quality:
class_quality[group_name] = class_group(results['FATE'])
metric_compare(abs_tol, rel_tol, match_details, **class_quality)
echo.echo("\n" + "#" * 60)
def metric_compare(abs_tol, rel_tol, match_details, **metric_results):
common_metrics = _get_common_metrics(**metric_results)
filtered_results = _filter_results(common_metrics, **metric_results)
table = PrettyTable()
table.set_style(ORGMODE)
script_model_names = list(filtered_results.keys())
table.field_names = ["Script Model Name"] + common_metrics
for script_model_name in script_model_names:
table.add_row([f"{script_model_name}"] +
[f"{TxtStyle.FIELD_VAL}{v}{TxtStyle.END}" for v in filtered_results[script_model_name]])
print(
table.get_string(title=f"{TxtStyle.TITLE}Comparison results of all metrics of Script Model FATE{TxtStyle.END}"))
_all_match(common_metrics, filtered_results, abs_tol, rel_tol, match_details=match_details)
def _save_quality(storage_tag, cache_directory, **results):
save_dir = "/".join([os.path.join(os.path.abspath(cache_directory), 'benchmark_history', "benchmark_quality.json")])
os.makedirs(os.path.dirname(save_dir), exist_ok=True)
if os.path.exists(save_dir):
with open(save_dir, 'r') as f:
benchmark_quality = json.load(f, object_hook=dict)
else:
benchmark_quality = {}
if storage_tag in benchmark_quality:
print("This tag already exists in the history and will be updated to the record information.")
benchmark_quality.update({storage_tag: results})
try:
with open(save_dir, 'w') as fp:
json.dump(benchmark_quality, fp, indent=2)
print("Storage success, please check: ", save_dir)
except Exception:
print("Storage failed, please check: ", save_dir)
def parse_summary_result(rs_dict):
for model_key in rs_dict:
rs_content = rs_dict[model_key]
if 'validate' in rs_content:
return rs_content['validate']
else:
return rs_content['train']
def extract_data(df, col_name, convert_float=True, keep_id=False):
"""
component output data to numpy array
Parameters
----------
df: dataframe
col_name: column to extract
convert_float: whether to convert extracted value to float value
keep_id: whether to keep id
Returns
-------
array of extracted data, optionally with id
"""
if keep_id:
if convert_float:
df[col_name] = df[col_name].to_numpy().astype(np.float64)
return df[[df.columns[0], col_name]].to_numpy()
else:
return df[col_name].to_numpy().astype(np.float64)
| 15,028 | 42.063037 | 120 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/_flow_client.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import time
import typing
from datetime import timedelta
from pathlib import Path
import requests
from fate_test._parser import Data, Job
from flow_sdk.client import FlowClient
from fate_test import _config
class FLOWClient(object):
def __init__(self,
address: typing.Optional[str],
data_base_dir: typing.Optional[Path],
cache_directory: typing.Optional[Path]):
self.address = address
self.version = "v1"
self._http = requests.Session()
self._data_base_dir = data_base_dir
self._cache_directory = cache_directory
self.data_size = 0
def set_address(self, address):
self.address = address
def upload_data(self, data: Data, callback=None, output_path=None):
try:
response, data_path, bind = self._upload_data(conf=data.config, output_path=output_path, verbose=0, drop=1)
if callback is not None:
callback(response)
if not bind:
status = self._awaiting(response.job_id, "local")
status = str(status).lower()
else:
status = response["retmsg"]
except Exception as e:
raise RuntimeError(f"upload data failed") from e
return status, data_path
def delete_data(self, data: Data):
try:
table_name = data.config['table_name'] if data.config.get(
'table_name', None) is not None else data.config.get('name')
self._delete_data(table_name=table_name, namespace=data.config['namespace'])
except Exception as e:
raise RuntimeError(f"delete data failed") from e
def submit_job(self, job: Job, callback=None) -> 'SubmitJobResponse':
try:
response = self._submit_job(**job.submit_params)
if callback is not None:
callback(response)
status = self._awaiting(response.job_id, "guest", callback)
response.status = status
except Exception as e:
raise RuntimeError(f"submit job failed") from e
return response
def deploy_model(self, model_id, model_version, dsl=None):
result = self._deploy_model(model_id=model_id, model_version=model_version, dsl=dsl)
return result
def output_data_table(self, job_id, role, party_id, component_name):
result = self._output_data_table(job_id=job_id, role=role, party_id=party_id, component_name=component_name)
return result
def table_info(self, table_name, namespace):
result = self._table_info(table_name=table_name, namespace=namespace)
return result
def add_notes(self, job_id, role, party_id, notes):
self._add_notes(job_id=job_id, role=role, party_id=party_id, notes=notes)
def check_connection(self):
try:
version = self._http.request(method="POST", url=f"{self._base}version/get", json={"module": "FATE"},
timeout=2).json()
except Exception:
import traceback
traceback.print_exc()
raise
fate_version = version.get("data", {}).get("FATE")
if fate_version:
return fate_version, self.address
raise EnvironmentError(f"connection not ok")
def _awaiting(self, job_id, role, callback=None):
while True:
response = self._query_job(job_id, role=role)
if response.status.is_done():
return response.status
if callback is not None:
callback(response)
time.sleep(1)
def _save_json(self, file, file_name):
"""
file = json.dumps(file, indent=4)
file_path = os.path.join(str(self._cache_directory), file_name)
try:
with open(file_path, "w", encoding='utf-8') as f:
f.write(file)
except Exception as e:
raise Exception(f"write error==>{e}")
return file_path
"""
return file
def _upload_data(self, conf, output_path=None, verbose=0, drop=1):
if conf.get("engine", {}) != "PATH":
if output_path is not None:
conf['file'] = os.path.join(os.path.abspath(output_path), os.path.basename(conf.get('file')))
else:
if _config.data_switch is not None:
conf['file'] = os.path.join(str(self._cache_directory), os.path.basename(conf.get('file')))
else:
conf['file'] = os.path.join(str(self._data_base_dir), conf.get('file'))
path = Path(conf.get('file'))
if not path.exists():
raise Exception('The file is obtained from the fate flow client machine, but it does not exist, '
f'please check the path: {path}')
upload_response = self.flow_client(request='data/upload', param=self._save_json(conf, 'upload_conf.json'),
verbose=verbose, drop=drop)
response = UploadDataResponse(upload_response)
return response, conf['file'], False
else:
if _config.data_switch is not None:
conf['address']['path'] = os.path.join(str(self._cache_directory), conf['address']['path'])
else:
conf['address']['path'] = os.path.join(str(self._data_base_dir), conf['address']['path'])
conf['drop'] = drop
del conf["extend_sid"]
del conf["auto_increasing_sid"]
del conf["use_local_data"]
path = Path(conf.get('address').get('path'))
self._table_bind(conf)
if not path.exists():
raise Exception('The file is obtained from the fate flow client machine, but it does not exist, '
f'please check the path: {path}')
response = self._table_bind(conf)
return response, None, True
def _table_info(self, table_name, namespace):
param = {
'table_name': table_name,
'namespace': namespace
}
response = self.flow_client(request='table/info', param=param)
return response
def _delete_data(self, table_name, namespace):
param = {
'table_name': table_name,
'namespace': namespace
}
response = self.flow_client(request='table/delete', param=param)
return response
def _submit_job(self, conf, dsl):
param = {
'job_dsl': self._save_json(dsl, 'submit_dsl.json'),
'job_runtime_conf': self._save_json(conf, 'submit_conf.json')
}
response = SubmitJobResponse(self.flow_client(request='job/submit', param=param))
return response
def _deploy_model(self, model_id, model_version, dsl=None):
post_data = {'model_id': model_id,
'model_version': model_version,
'predict_dsl': dsl}
response = self.flow_client(request='model/deploy', param=post_data)
result = {}
try:
retcode = response['retcode']
retmsg = response['retmsg']
if retcode != 0 or retmsg != 'success':
raise RuntimeError(f"deploy model error: {response}")
result["model_id"] = response["data"]["model_id"]
result["model_version"] = response["data"]["model_version"]
except Exception as e:
raise RuntimeError(f"deploy model error: {response}") from e
return result
def _output_data_table(self, job_id, role, party_id, component_name):
post_data = {'job_id': job_id,
'role': role,
'party_id': party_id,
'component_name': component_name}
response = self.flow_client(request='component/output_data_table', param=post_data)
result = {}
try:
retcode = response['retcode']
retmsg = response['retmsg']
if retcode != 0 or retmsg != 'success':
raise RuntimeError(f"deploy model error: {response}")
result["name"] = response["data"][0]["table_name"]
result["namespace"] = response["data"][0]["table_namespace"]
except Exception as e:
raise RuntimeError(f"output data table error: {response}") from e
return result
def _get_summary(self, job_id, role, party_id, component_name):
post_data = {'job_id': job_id,
'role': role,
'party_id': party_id,
'component_name': component_name}
response = self.flow_client(request='component/get_summary', param=post_data)
try:
retcode = response['retcode']
retmsg = response['retmsg']
result = {}
if retcode != 0 or retmsg != 'success':
raise RuntimeError(f"deploy model error: {response}")
result["summary_dir"] = retmsg # 获取summary文件位置
except Exception as e:
raise RuntimeError(f"output data table error: {response}") from e
return result
def _query_job(self, job_id, role):
param = {
'job_id': job_id,
'role': role
}
response = QueryJobResponse(self.flow_client(request='job/query', param=param))
return response
def get_version(self):
response = self._post(url='version/get', json={"module": "FATE"})
try:
retcode = response['retcode']
retmsg = response['retmsg']
if retcode != 0 or retmsg != 'success':
raise RuntimeError(f"get version error: {response}")
fate_version = response["data"]["FATE"]
except Exception as e:
raise RuntimeError(f"get version error: {response}") from e
return fate_version
def _add_notes(self, job_id, role, party_id, notes):
data = dict(job_id=job_id, role=role, party_id=party_id, notes=notes)
response = AddNotesResponse(self._post(url='job/update', json=data))
return response
def _table_bind(self, data):
response = self._post(url='table/bind', json=data)
try:
retcode = response['retcode']
retmsg = response['retmsg']
if retcode != 0 or retmsg != 'success':
raise RuntimeError(f"table bind error: {response}")
except Exception as e:
raise RuntimeError(f"table bind error: {response}") from e
return response
@property
def _base(self):
return f"http://{self.address}/{self.version}/"
def _post(self, url, **kwargs) -> dict:
request_url = self._base + url
try:
response = self._http.request(method='post', url=request_url, **kwargs)
except Exception as e:
raise RuntimeError(f"post {url} with {kwargs} failed") from e
try:
if isinstance(response, requests.models.Response):
response = response.json()
else:
try:
response = json.loads(response.content.decode('utf-8', 'ignore'), strict=False)
except (TypeError, ValueError):
return response
except json.decoder.JSONDecodeError:
response = {'retcode': 100,
'retmsg': "Internal server error. Nothing in response. You may check out the configuration in "
"'FATE/conf/service_conf.yaml' and restart fate flow server."}
return response
def flow_client(self, request, param, verbose=0, drop=0):
client = FlowClient(self.address.split(':')[0], self.address.split(':')[1], self.version)
if request == 'data/upload':
stdout = client.data.upload(config_data=param, verbose=verbose, drop=drop)
elif request == 'table/delete':
stdout = client.table.delete(table_name=param['table_name'], namespace=param['namespace'])
elif request == 'table/info':
stdout = client.table.info(table_name=param['table_name'], namespace=param['namespace'])
elif request == 'job/submit':
stdout = client.job.submit(config_data=param['job_runtime_conf'], dsl_data=param['job_dsl'])
elif request == 'job/query':
stdout = client.job.query(job_id=param['job_id'], role=param['role'])
elif request == 'model/deploy':
stdout = client.model.deploy(model_id=param['model_id'], model_version=param['model_version'],
predict_dsl=param['predict_dsl'])
elif request == 'component/output_data_table':
stdout = client.component.output_data_table(job_id=param['job_id'], role=param['role'],
party_id=param['party_id'],
component_name=param['component_name'])
elif request == 'component/get_summary':
stdout = client.component.get_summary(job_id=param['job_id'], role=param['role'],
party_id=param['party_id'],
component_name=param['component_name'])
else:
stdout = {"retcode": None}
status = stdout["retcode"]
if status != 0:
if request == 'table/delete' and stdout["retmsg"] == "no find table":
return stdout
raise ValueError({'retcode': 100, 'retmsg': stdout["retmsg"]})
return stdout
class Status(object):
def __init__(self, status: str):
self.status = status
def is_done(self):
return self.status.lower() in ['complete', 'success', 'canceled', 'failed', "timeout"]
def is_success(self):
return self.status.lower() in ['complete', 'success']
def __str__(self):
return self.status
def __repr__(self):
return self.__str__()
class QueryJobResponse(object):
def __init__(self, response: dict):
try:
status = Status(response.get('data')[0]["f_status"])
progress = response.get('data')[0]['f_progress']
except Exception as e:
raise RuntimeError(f"query job error, response: {response}") from e
self.status = status
self.progress = progress
class UploadDataResponse(object):
def __init__(self, response: dict):
try:
self.job_id = response["jobId"]
except Exception as e:
raise RuntimeError(f"upload error, response: {response}") from e
self.status: typing.Optional[Status] = None
class AddNotesResponse(object):
def __init__(self, response: dict):
try:
retcode = response['retcode']
retmsg = response['retmsg']
if retcode != 0 or retmsg != 'success':
raise RuntimeError(f"add notes error: {response}")
except Exception as e:
raise RuntimeError(f"add notes error: {response}") from e
class SubmitJobResponse(object):
def __init__(self, response: dict):
try:
self.job_id = response["jobId"]
self.model_info = response["data"]["model_info"]
except Exception as e:
raise RuntimeError(f"submit job error, response: {response}") from e
self.status: typing.Optional[Status] = None
class DataProgress(object):
def __init__(self, role_str):
self.role_str = role_str
self.start = time.time()
self.show_str = f"[{self.elapse()}] {self.role_str}"
self.job_id = ""
def elapse(self):
return f"{timedelta(seconds=int(time.time() - self.start))}"
def submitted(self, job_id):
self.job_id = job_id
self.show_str = f"[{self.elapse()}]{self.job_id} {self.role_str}"
def update(self):
self.show_str = f"[{self.elapse()}]{self.job_id} {self.role_str}"
def show(self):
return self.show_str
class JobProgress(object):
def __init__(self, name):
self.name = name
self.start = time.time()
self.show_str = f"[{self.elapse()}] {self.name}"
self.job_id = ""
self.progress_tracking = ""
def elapse(self):
return f"{timedelta(seconds=int(time.time() - self.start))}"
def set_progress_tracking(self, progress_tracking):
self.progress_tracking = progress_tracking + " "
def submitted(self, job_id):
self.job_id = job_id
self.show_str = f"{self.progress_tracking}[{self.elapse()}]{self.job_id} submitted {self.name}"
def running(self, status, progress):
if progress is None:
progress = 0
self.show_str = f"{self.progress_tracking}[{self.elapse()}]{self.job_id} {status} {progress:3}% {self.name}"
def exception(self, exception_id):
self.show_str = f"{self.progress_tracking}[{self.elapse()}]{self.name} exception({exception_id}): {self.job_id}"
def final(self, status):
self.show_str = f"{self.progress_tracking}[{self.elapse()}]{self.job_id} {status} {self.name}"
def show(self):
return self.show_str
| 17,903 | 38.964286 | 120 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/_ascii.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
HEAD = """\
████████╗███████╗███████╗████████╗███████╗██╗ ██╗██╗████████╗███████╗
╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝██╔════╝██║ ██║██║╚══██╔══╝██╔════╝
██║ █████╗ ███████╗ ██║ ███████╗██║ ██║██║ ██║ █████╗
██║ ██╔══╝ ╚════██║ ██║ ╚════██║██║ ██║██║ ██║ ██╔══╝
██║ ███████╗███████║ ██║ ███████║╚██████╔╝██║ ██║ ███████╗
╚═╝ ╚══════╝╚══════╝ ╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ╚══════╝
"""
BENCHMARK = """\
██████╗ ███████╗███╗ ██╗ ██████╗██╗ ██╗███╗ ███╗ █████╗ ██████╗ ██╗ ██╗
██╔══██╗██╔════╝████╗ ██║██╔════╝██║ ██║████╗ ████║██╔══██╗██╔══██╗██║ ██╔╝
██████╔╝█████╗ ██╔██╗ ██║██║ ███████║██╔████╔██║███████║██████╔╝█████╔╝
██╔══██╗██╔══╝ ██║╚██╗██║██║ ██╔══██║██║╚██╔╝██║██╔══██║██╔══██╗██╔═██╗
██████╔╝███████╗██║ ╚████║╚██████╗██║ ██║██║ ╚═╝ ██║██║ ██║██║ ██║██║ ██╗
╚═════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝
"""
TAIL = """\
██╗ ██╗ █████╗ ██╗ ██╗███████╗ ███████╗██╗ ██╗███╗ ██╗
██║ ██║██╔══██╗██║ ██║██╔════╝ ██╔════╝██║ ██║████╗ ██║
███████║███████║██║ ██║█████╗ █████╗ ██║ ██║██╔██╗ ██║
██╔══██║██╔══██║╚██╗ ██╔╝██╔══╝ ██╔══╝ ██║ ██║██║╚██╗██║
██║ ██║██║ ██║ ╚████╔╝ ███████╗ ██║ ╚██████╔╝██║ ╚████║
╚═╝ ╚═╝╚═╝ ╚═╝ ╚═══╝ ╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝
"""
| 1,987 | 38.76 | 77 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/_client.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sshtunnel
from fate_test._flow_client import FLOWClient
from fate_test._io import LOGGER
from fate_test._parser import Config
class Clients(object):
def __init__(self, config: Config):
self._flow_clients = {}
self._tunnel_id_to_flow_clients = {}
self._role_str_to_service_id = {}
self._tunnel_id_to_tunnel = config.tunnel_id_to_tunnel
for service_id, service in config.service_id_to_service.items():
if isinstance(service, Config.service):
self._flow_clients[service_id] = FLOWClient(
service.address, config.data_base_dir, config.cache_directory)
elif isinstance(service, Config.tunnel_service):
self._flow_clients[service_id] = FLOWClient(None, config.data_base_dir, config.cache_directory)
self._tunnel_id_to_flow_clients.setdefault(service.tunnel_id, []).append(
(service.index, self._flow_clients[service_id]))
for party, service_id in config.party_to_service_id.items():
for role_str in config.parties.party_to_role_string(party):
self._role_str_to_service_id[role_str] = service_id
def __getitem__(self, role_str: str) -> 'FLOWClient':
if role_str not in self._role_str_to_service_id:
raise RuntimeError(f"no flow client found binding to {role_str}")
return self._flow_clients[self._role_str_to_service_id[role_str]]
def __enter__(self):
# open ssh tunnels and create flow clients for remote
self._tunnels = []
for tunnel_id, tunnel_conf in self._tunnel_id_to_tunnel.items():
tunnel = sshtunnel.SSHTunnelForwarder(ssh_address_or_host=tunnel_conf.ssh_address,
ssh_username=tunnel_conf.ssh_username,
ssh_password=tunnel_conf.ssh_password,
ssh_pkey=tunnel_conf.ssh_priv_key,
remote_bind_addresses=tunnel_conf.services_address)
tunnel.start()
self._tunnels.append(tunnel)
for index, flow_client in self._tunnel_id_to_flow_clients[tunnel_id]:
flow_client.set_address(f"127.0.0.1:{tunnel.local_bind_ports[index]}")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for tunnel in self._tunnels:
try:
tunnel.stop()
except Exception as e:
LOGGER.exception(e)
def contains(self, role_str):
return role_str in self._role_str_to_service_id
def all_roles(self):
return sorted(self._role_str_to_service_id.keys())
| 3,367 | 42.74026 | 111 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/benchmark_cli.py
|
import os
import re
import time
import uuid
from datetime import timedelta
from inspect import signature
import click
from fate_test._client import Clients
from fate_test._config import Config
from fate_test._io import LOGGER, echo
from fate_test._parser import BenchmarkSuite
from fate_test.scripts._options import SharedOptions
from fate_test.scripts._utils import _upload_data, _delete_data, _load_testsuites, _load_module_from_script
from fate_test.utils import show_data, match_metrics
DATA_DISPLAY_PATTERN = re.compile("^FATE")
@click.command(name="benchmark-quality")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *benchmark.json under these paths")
@click.option('-e', '--exclude', type=click.Path(exists=True), multiple=True,
help="exclude *benchmark.json under these paths")
@click.option('-g', '--glob', type=str,
help="glob string to filter sub-directory of path specified by <include>")
@click.option('-t', '--tol', type=float,
help="tolerance (absolute error) for metrics to be considered almost equal. "
"Comparison is done by evaluating abs(a-b) <= max(relative_tol * max(abs(a), abs(b)), absolute_tol)")
@click.option('-s', '--storage-tag', type=str,
help="tag for storing metrics, for future metrics info comparison")
@click.option('-v', '--history-tag', type=str, multiple=True,
help="Extract metrics info from history tags for comparison")
@click.option('-d', '--match-details', type=click.Choice(['all', 'relative', 'absolute', 'none']),
default="all", help="Error value display in algorithm comparison")
@click.option('--skip-data', is_flag=True, default=False,
help="skip uploading data specified in benchmark conf")
@click.option("--disable-clean-data", "clean_data", flag_value=False, default=None)
@click.option("--enable-clean-data", "clean_data", flag_value=True, default=None)
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def run_benchmark(ctx, include, exclude, glob, skip_data, tol, clean_data, storage_tag, history_tag, match_details,
**kwargs):
"""
process benchmark suite, alias: bq
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
if ctx.obj["extend_sid"] is not None:
config_inst.extend_sid = ctx.obj["extend_sid"]
if ctx.obj["auto_increasing_sid"] is not None:
config_inst.auto_increasing_sid = ctx.obj["auto_increasing_sid"]
if clean_data is None:
clean_data = config_inst.clean_data
data_namespace_mangling = ctx.obj["namespace_mangling"]
yes = ctx.obj["yes"]
echo.welcome("benchmark")
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo("loading testsuites:")
suites = _load_testsuites(includes=include, excludes=exclude, glob=glob,
suffix="benchmark.json", suite_type="benchmark")
for suite in suites:
echo.echo(f"\tdataset({len(suite.dataset)}) benchmark groups({len(suite.pairs)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
with Clients(config_inst) as client:
fate_version = client["guest_0"].get_version()
for i, suite in enumerate(suites):
# noinspection PyBroadException
try:
start = time.time()
echo.echo(f"[{i + 1}/{len(suites)}]start at {time.strftime('%Y-%m-%d %X')} {suite.path}", fg='red')
if not skip_data:
try:
_upload_data(client, suite, config_inst)
except Exception as e:
raise RuntimeError(f"exception occur while uploading data for {suite.path}") from e
try:
_run_benchmark_pairs(config_inst, suite, tol, namespace, data_namespace_mangling, storage_tag,
history_tag, fate_version, match_details)
except Exception as e:
raise RuntimeError(f"exception occur while running benchmark jobs for {suite.path}") from e
if not skip_data and clean_data:
_delete_data(client, suite)
echo.echo(f"[{i + 1}/{len(suites)}]elapse {timedelta(seconds=int(time.time() - start))}", fg='red')
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception in {suite.path}, exception_id={exception_id}", err=True, fg='red')
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@LOGGER.catch
def _run_benchmark_pairs(config: Config, suite: BenchmarkSuite, tol: float, namespace: str,
data_namespace_mangling: bool, storage_tag, history_tag, fate_version, match_details):
# pipeline demo goes here
pair_n = len(suite.pairs)
fate_base = config.fate_base
PYTHONPATH = os.environ.get('PYTHONPATH') + ":" + os.path.join(fate_base, "python")
os.environ['PYTHONPATH'] = PYTHONPATH
for i, pair in enumerate(suite.pairs):
echo.echo(f"Running [{i + 1}/{pair_n}] group: {pair.pair_name}")
results = {}
# data_summary = None
job_n = len(pair.jobs)
for j, job in enumerate(pair.jobs):
try:
echo.echo(f"Running [{j + 1}/{job_n}] job: {job.job_name}")
job_name, script_path, conf_path = job.job_name, job.script_path, job.conf_path
param = Config.load_from_file(conf_path)
mod = _load_module_from_script(script_path)
input_params = signature(mod.main).parameters
# local script
if len(input_params) == 1:
data, metric = mod.main(param=param)
elif len(input_params) == 2:
data, metric = mod.main(config=config, param=param)
# pipeline script
elif len(input_params) == 3:
if data_namespace_mangling:
data, metric = mod.main(config=config, param=param, namespace=f"_{namespace}")
else:
data, metric = mod.main(config=config, param=param)
else:
data, metric = mod.main()
results[job_name] = metric
echo.echo(f"[{j + 1}/{job_n}] job: {job.job_name} Success!\n")
if data and DATA_DISPLAY_PATTERN.match(job_name):
# data_summary = data
show_data(data)
# if data_summary is None:
# data_summary = data
except Exception as e:
exception_id = uuid.uuid1()
echo.echo(f"exception while running [{j + 1}/{job_n}] job, exception_id={exception_id}", err=True,
fg='red')
LOGGER.exception(f"exception id: {exception_id}, error message: \n{e}")
continue
rel_tol = pair.compare_setting.get("relative_tol")
# show_data(data_summary)
match_metrics(evaluate=True, group_name=pair.pair_name, abs_tol=tol, rel_tol=rel_tol,
storage_tag=storage_tag, history_tag=history_tag, fate_version=fate_version,
cache_directory=config.cache_directory, match_details=match_details, **results)
| 7,654 | 49.361842 | 120 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/secure_protocol_cli.py
|
import click
import os
from fate_test._io import LOGGER, echo
from fate_test.scripts._options import SharedOptions
from fate_test.scripts.op_test.fate_he_performance_test import PaillierAssess
from fate_test.scripts.op_test.spdz_test import SPDZTest
@click.group(name="secure_protocol")
def secure_protocol_group():
"""
secureprotol test
"""
...
@secure_protocol_group.command("paillier")
@click.option("-round", "--test-round", type=int, help="", default=1)
@click.option("-num", "--data-num", type=int, help="", default=10000)
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def paillier_test(ctx, data_num, test_round, **kwargs):
"""
paillier
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if not yes and not click.confirm("running?"):
return
for method in ["Paillier", "IPCL"]:
try:
assess_table = PaillierAssess(method=method, data_num=data_num, test_round=test_round)
except ValueError as e:
print(e, "\n")
continue
table = assess_table.output_table()
echo.echo(table)
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@secure_protocol_group.command("spdz")
@click.option("-round", "--test-round", type=int, help="", default=1)
@click.option("-num", "--data-num", type=int, help="", default=10000)
@click.option("-partition", "--data-partition", type=int, help="", default=4)
@click.option("-lower_bound", "--data-lower-bound", type=int, help="", default=-1e9)
@click.option("-upper_bound", "--data-upper-bound", type=int, help="", default=1e9)
@click.option("-seed", "--seed", type=int, help="", default=123)
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def spdz_test(ctx, data_num, seed, data_partition, test_round,
data_lower_bound, data_upper_bound, **kwargs):
"""
spdz_test
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if not yes and not click.confirm("running?"):
return
conf = ctx.obj["config"]
runtime_config_path_prefix = \
os.path.abspath(conf.fate_base) + "/python/fate_test/fate_test/scripts/op_test/spdz_conf/"
params = dict(data_num=data_num, seed=seed, data_partition=data_partition,
test_round=test_round, data_lower_bound=data_lower_bound,
data_upper_bound=data_upper_bound)
flow_address = None
for idx, address in enumerate(conf.serving_setting["flow_services"]):
if conf.role["guest"][0] in address["parties"]:
flow_address = address["address"]
spdz_test = SPDZTest(params=params,
conf_path=runtime_config_path_prefix + "job_conf.json",
dsl_path=runtime_config_path_prefix + "job_dsl.json",
flow_address=flow_address,
guest_party_id=[conf.role["guest"][0]],
host_party_id=[conf.role["host"][0]])
tables = spdz_test.run()
for table in tables:
echo.echo(table)
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
| 3,443 | 34.142857 | 98 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/quick_test_cli.py
|
import os
import subprocess
import click
from fate_test._config import Config
from fate_test._io import LOGGER, echo
from fate_test.scripts._options import SharedOptions
@click.group(name="unittest")
def unittest_group():
"""
unit test
"""
...
@unittest_group.command("federatedml")
@click.option('-i', '--include', type=click.Path(exists=True), multiple=True, metavar="<include>",
help="Specify federatedml test units for testing")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def unit_test(ctx, include, **kwargs):
"""
federatedml unit test
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if not yes and not click.confirm("running?"):
return
error_log_file = f"./logs/{namespace}/error_test.log"
os.makedirs(os.path.dirname(error_log_file), exist_ok=True)
run_test(includes=include, conf=config_inst, error_log_file=error_log_file)
def run_test(includes, conf: Config, error_log_file):
def error_log(stdout):
if stdout is None:
return os.path.abspath(error_log_file)
with open(error_log_file, "a") as f:
f.write(stdout)
def run_test(file):
global failed_count
echo.echo("start to run test {}".format(file))
try:
subp = subprocess.Popen(["python", file],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = stdout.decode("utf-8")
echo.echo(stdout)
if "FAILED" in stdout:
failed_count += 1
error_log(stdout=f"error sequence {failed_count}: {file}")
error_log(stdout=stdout)
except Exception:
return
def traverse_folder(file_fullname):
if os.path.isfile(file_fullname):
if "_test.py" in file_fullname and "ftl" not in file_fullname:
run_test(file_fullname)
else:
for file in os.listdir(file_fullname):
file_fullname_new = os.path.join(file_fullname, file)
if os.path.isdir(file_fullname_new):
traverse_folder(file_fullname_new)
if "_test.py" in file and ("/test" in file_fullname or "tests" in file_fullname):
if "ftl" in file_fullname_new:
continue
else:
run_test(file_fullname_new)
global failed_count
failed_count = 0
fate_base = conf.fate_base
ml_dir = os.path.join(fate_base, "python/federatedml")
PYTHONPATH = os.environ.get('PYTHONPATH') + ":" + os.path.join(fate_base, "python")
os.environ['PYTHONPATH'] = PYTHONPATH
if len(includes) == 0:
traverse_folder(ml_dir)
else:
ml_dir = includes
for v in ml_dir:
traverse_folder(os.path.abspath(v))
echo.echo(f"there are {failed_count} failed test")
if failed_count > 0:
print('Please check the error content: {}'.format(error_log(None)))
| 3,238 | 33.094737 | 98 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/_utils.py
|
import importlib
import os
import time
import uuid
import glob as glob_
from pathlib import Path
import click
from fate_test import _config
from fate_test._client import Clients
from fate_test._config import Config
from fate_test._flow_client import DataProgress, UploadDataResponse, QueryJobResponse
from fate_test._io import echo, LOGGER, set_logger
from fate_test._parser import Testsuite, BenchmarkSuite, DATA_JSON_HOOK, CONF_JSON_HOOK, DSL_JSON_HOOK
def _big_data_task(includes, guest_data_size, host_data_size, guest_feature_num, host_feature_num, host_data_type,
config_inst, encryption_type, match_rate, sparsity, force, split_host, output_path, parallelize):
from fate_test.scripts import generate_mock_data
def _find_testsuite_files(path):
suffix = ["testsuite.json", "benchmark.json"]
if isinstance(path, str):
path = Path(path)
if path.is_file():
if path.name.endswith(suffix[0]) or path.name.endswith(suffix[1]):
paths = [path]
else:
LOGGER.warning(f"{path} is file, but not end with `{suffix}`, skip")
paths = []
return [p.resolve() for p in paths]
else:
os.path.abspath(path)
paths = glob_.glob(f"{path}/*{suffix[0]}") + glob_.glob(f"{path}/*{suffix[1]}")
return [Path(p) for p in paths]
for include in includes:
if isinstance(include, str):
include_paths = Path(include)
include_paths = _find_testsuite_files(include_paths)
for include_path in include_paths:
generate_mock_data.get_big_data(guest_data_size, host_data_size, guest_feature_num, host_feature_num,
include_path, host_data_type, config_inst, encryption_type,
match_rate, sparsity, force, split_host, output_path, parallelize)
def _load_testsuites(includes, excludes, glob, provider=None, suffix="testsuite.json", suite_type="testsuite"):
def _find_testsuite_files(path):
if isinstance(path, str):
path = Path(path)
if path.is_file():
if path.name.endswith(suffix):
paths = [path]
else:
LOGGER.warning(f"{path} is file, but not end with `{suffix}`, skip")
paths = []
else:
paths = path.glob(f"**/*{suffix}")
return [p.resolve() for p in paths]
excludes_set = set()
for exclude in excludes:
excludes_set.update(_find_testsuite_files(exclude))
suite_paths = set()
for include in includes:
if isinstance(include, str):
include = Path(include)
# glob
if glob is not None and include.is_dir():
include_list = include.glob(glob)
else:
include_list = [include]
for include_path in include_list:
for suite_path in _find_testsuite_files(include_path):
if suite_path not in excludes_set:
suite_paths.add(suite_path)
suites = []
for suite_path in suite_paths:
try:
if suite_type == "testsuite":
suite = Testsuite.load(suite_path.resolve(), provider)
elif suite_type == "benchmark":
suite = BenchmarkSuite.load(suite_path.resolve())
else:
raise ValueError(f"Unsupported suite type: {suite_type}. Only accept type 'testsuite' or 'benchmark'.")
except Exception as e:
echo.stdout(f"load suite {suite_path} failed: {e}")
else:
suites.append(suite)
return suites
@LOGGER.catch
def _upload_data(clients: Clients, suite, config: Config, output_path=None):
with click.progressbar(length=len(suite.dataset),
label="dataset",
show_eta=False,
show_pos=True,
width=24) as bar:
for i, data in enumerate(suite.dataset):
data.update(config)
table_name = data.config['table_name'] if data.config.get(
'table_name', None) is not None else data.config.get('name')
data_progress = DataProgress(f"{data.role_str}<-{data.config['namespace']}.{table_name}")
def update_bar(n_step):
bar.item_show_func = lambda x: data_progress.show()
time.sleep(0.1)
bar.update(n_step)
def _call_back(resp):
if isinstance(resp, UploadDataResponse):
data_progress.submitted(resp.job_id)
echo.file(f"[dataset]{resp.job_id}")
if isinstance(resp, QueryJobResponse):
data_progress.update()
update_bar(0)
try:
echo.stdout_newline()
status, data_path = clients[data.role_str].upload_data(data, _call_back, output_path)
time.sleep(1)
data_progress.update()
if status != 'success':
raise RuntimeError(f"uploading {i + 1}th data for {suite.path} {status}")
bar.update(1)
if _config.data_switch:
from fate_test.scripts import generate_mock_data
generate_mock_data.remove_file(data_path)
except Exception:
exception_id = str(uuid.uuid1())
echo.file(f"exception({exception_id})")
LOGGER.exception(f"exception id: {exception_id}")
echo.echo(f"upload {i + 1}th data {data.config} to {data.role_str} fail, exception_id: {exception_id}")
# raise RuntimeError(f"exception uploading {i + 1}th data") from e
def _delete_data(clients: Clients, suite: Testsuite):
with click.progressbar(length=len(suite.dataset),
label="delete ",
show_eta=False,
show_pos=True,
width=24) as bar:
for data in suite.dataset:
# noinspection PyBroadException
try:
table_name = data.config['table_name'] if data.config.get(
'table_name', None) is not None else data.config.get('name')
bar.item_show_func = \
lambda x: f"delete table: name={table_name}, namespace={data.config['namespace']}"
clients[data.role_str].delete_data(data)
except Exception:
LOGGER.exception(
f"delete failed: name={table_name}, namespace={data.config['namespace']}")
time.sleep(0.5)
bar.update(1)
echo.stdout_newline()
def _load_module_from_script(script_path):
module_name = str(script_path).split("/", -1)[-1].split(".")[0]
loader = importlib.machinery.SourceFileLoader(module_name, str(script_path))
spec = importlib.util.spec_from_loader(loader.name, loader)
mod = importlib.util.module_from_spec(spec)
loader.exec_module(mod)
return mod
def _set_namespace(data_namespace_mangling, namespace):
Path(f"logs/{namespace}").mkdir(exist_ok=True, parents=True)
set_logger(f"logs/{namespace}/exception.log")
echo.set_file(click.open_file(f'logs/{namespace}/stdout', "a"))
if data_namespace_mangling:
echo.echo(f"add data_namespace_mangling: _{namespace}")
DATA_JSON_HOOK.add_extend_namespace_hook(namespace)
CONF_JSON_HOOK.add_extend_namespace_hook(namespace)
def _add_replace_hook(replace):
DATA_JSON_HOOK.add_replace_hook(replace)
CONF_JSON_HOOK.add_replace_hook(replace)
DSL_JSON_HOOK.add_replace_hook(replace)
| 7,792 | 40.452128 | 119 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/testsuite_cli.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import uuid
from datetime import timedelta
import click
from fate_test import _config
from fate_test._client import Clients
from fate_test._config import Config
from fate_test._flow_client import JobProgress, SubmitJobResponse, QueryJobResponse
from fate_test._io import LOGGER, echo
from fate_test._parser import JSON_STRING, Testsuite, non_success_summary
from fate_test.scripts._options import SharedOptions
from fate_test.scripts._utils import _load_testsuites, _upload_data, _delete_data, _load_module_from_script, \
_add_replace_hook
@click.command("suite")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *testsuite.json under these paths")
@click.option('-e', '--exclude', type=click.Path(exists=True), multiple=True,
help="exclude *testsuite.json under these paths")
@click.option('-r', '--replace', default="{}", type=JSON_STRING,
help="a json string represents mapping for replacing fields in data/conf/dsl")
@click.option("-g", '--glob', type=str,
help="glob string to filter sub-directory of path specified by <include>")
@click.option('-m', '--timeout', type=int, default=3600, help="maximun running time of job")
@click.option('-p', '--task-cores', type=int, help="processors per node")
@click.option('-uj', '--update-job-parameters', default="{}", type=JSON_STRING,
help="a json string represents mapping for replacing fields in conf.job_parameters")
@click.option('-uc', '--update-component-parameters', default="{}", type=JSON_STRING,
help="a json string represents mapping for replacing fields in conf.component_parameters")
@click.option("--skip-dsl-jobs", is_flag=True, default=False,
help="skip dsl jobs defined in testsuite")
@click.option("--skip-pipeline-jobs", is_flag=True, default=False,
help="skip pipeline jobs defined in testsuite")
@click.option("--skip-data", is_flag=True, default=False,
help="skip uploading data specified in testsuite")
@click.option("--data-only", is_flag=True, default=False,
help="upload data only")
@click.option("--provider", type=str,
help="Select the fate version, for example: [email protected]")
@click.option("--disable-clean-data", "clean_data", flag_value=False, default=None)
@click.option("--enable-clean-data", "clean_data", flag_value=True, default=None)
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def run_suite(ctx, replace, include, exclude, glob, timeout, update_job_parameters, update_component_parameters,
skip_dsl_jobs, skip_pipeline_jobs, skip_data, data_only, clean_data, task_cores, provider, **kwargs):
"""
process testsuite
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
config_inst = ctx.obj["config"]
if ctx.obj["extend_sid"] is not None:
config_inst.extend_sid = ctx.obj["extend_sid"]
if ctx.obj["auto_increasing_sid"] is not None:
config_inst.auto_increasing_sid = ctx.obj["auto_increasing_sid"]
if clean_data is None:
clean_data = config_inst.clean_data
namespace = ctx.obj["namespace"]
yes = ctx.obj["yes"]
data_namespace_mangling = ctx.obj["namespace_mangling"]
# prepare output dir and json hooks
_add_replace_hook(replace)
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo("loading testsuites:")
suites = _load_testsuites(includes=include, excludes=exclude, glob=glob, provider=provider)
for suite in suites:
_config.jobs_num += len(suite.jobs)
echo.echo(f"\tdataset({len(suite.dataset)}) dsl jobs({len(suite.jobs)}) "
f"pipeline jobs ({len(suite.pipeline_jobs)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
echo.stdout_newline()
with Clients(config_inst) as client:
for i, suite in enumerate(suites):
# noinspection PyBroadException
try:
start = time.time()
echo.echo(f"[{i + 1}/{len(suites)}]start at {time.strftime('%Y-%m-%d %X')} {suite.path}", fg='red')
if not skip_data:
try:
_upload_data(client, suite, config_inst)
except Exception as e:
raise RuntimeError(f"exception occur while uploading data for {suite.path}") from e
if data_only:
continue
if not skip_dsl_jobs:
echo.stdout_newline()
try:
time_consuming = _submit_job(client, suite, namespace, config_inst, timeout,
update_job_parameters, update_component_parameters, task_cores)
except Exception as e:
raise RuntimeError(f"exception occur while submit job for {suite.path}") from e
if not skip_pipeline_jobs:
try:
_run_pipeline_jobs(config_inst, suite, namespace, data_namespace_mangling)
except Exception as e:
raise RuntimeError(f"exception occur while running pipeline jobs for {suite.path}") from e
if not skip_data and clean_data:
_delete_data(client, suite)
echo.echo(f"[{i + 1}/{len(suites)}]elapse {timedelta(seconds=int(time.time() - start))}", fg='red')
if not skip_dsl_jobs or not skip_pipeline_jobs:
suite_file = str(suite.path).split("/")[-1]
echo.echo(suite.pretty_final_summary(time_consuming, suite_file))
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception in {suite.path}, exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
non_success_summary()
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
def _submit_job(clients: Clients, suite: Testsuite, namespace: str, config: Config, timeout, update_job_parameters,
update_component_parameters, task_cores):
# submit jobs
with click.progressbar(length=len(suite.jobs),
label="jobs ",
show_eta=False,
show_pos=True,
width=24) as bar:
time_list = []
for job in suite.jobs_iter():
job_progress = JobProgress(job.job_name)
start = time.time()
_config.jobs_progress += 1
def _raise():
exception_id = str(uuid.uuid1())
job_progress.exception(exception_id)
suite.update_status(job_name=job.job_name, exception_id=exception_id)
echo.file(f"exception({exception_id})")
LOGGER.exception(f"exception id: {exception_id}")
# noinspection PyBroadException
try:
if task_cores is not None:
job.job_conf.update_job_common_parameters(task_cores=task_cores)
job.job_conf.update(config.parties, timeout, update_job_parameters,
update_component_parameters)
except Exception:
_raise()
continue
def update_bar(n_step):
bar.item_show_func = lambda x: job_progress.show()
time.sleep(0.1)
bar.update(n_step)
update_bar(1)
def _call_back(resp: SubmitJobResponse):
if isinstance(resp, SubmitJobResponse):
progress_tracking = "/".join([str(_config.jobs_progress), str(_config.jobs_num)])
if _config.jobs_num != len(suite.jobs):
job_progress.set_progress_tracking(progress_tracking)
job_progress.submitted(resp.job_id)
echo.file(f"[jobs] {resp.job_id} ", nl=False)
suite.update_status(job_name=job.job_name, job_id=resp.job_id)
if isinstance(resp, QueryJobResponse):
job_progress.running(resp.status, resp.progress)
update_bar(0)
# noinspection PyBroadException
try:
response = clients["guest_0"].submit_job(job=job, callback=_call_back)
# noinspection PyBroadException
try:
# add notes
notes = f"{job.job_name}@{suite.path}@{namespace}"
for role, party_id_list in job.job_conf.role.items():
for i, party_id in enumerate(party_id_list):
clients[f"{role}_{i}"].add_notes(job_id=response.job_id, role=role, party_id=party_id,
notes=notes)
except Exception:
pass
except Exception:
_raise()
else:
job_progress.final(response.status)
job_name = job.job_name
suite.update_status(job_name=job_name, status=response.status.status)
if suite.model_in_dep(job_name):
_config.jobs_progress += 1
if not response.status.is_success():
suite.remove_dependency(job_name)
else:
dependent_jobs = suite.get_dependent_jobs(job_name)
for predict_job in dependent_jobs:
model_info, table_info, cache_info, model_loader_info = None, None, None, None
deps_data = _config.deps_alter[predict_job.job_name]
if 'data_deps' in deps_data.keys() and deps_data.get('data', None) is not None and\
job_name == deps_data.get('data_deps', None).get('name', None):
for k, v in deps_data.get('data'):
if job_name == k:
data_pre = v
roles = list(data_pre.keys())
table_info, hierarchy = [], []
for role_ in roles:
role, index = role_.split("_")
input_ = data_pre[role_]
for data_input, cpn in input_.items():
try:
table_name = clients["guest_0"].output_data_table(
job_id=response.job_id,
role=role,
party_id=config.role[role][int(index)],
component_name=cpn)
except Exception:
_raise()
if predict_job.job_conf.dsl_version == 2:
hierarchy.append([role, index, data_input])
table_info.append({'table': table_name})
else:
hierarchy.append([role, 'args', 'data'])
table_info.append({data_input: [table_name]})
table_info = {'hierarchy': hierarchy, 'table_info': table_info}
if 'model_deps' in deps_data.keys() and \
job_name == deps_data.get('model_deps', None).get('name', None):
if predict_job.job_conf.dsl_version == 2:
# noinspection PyBroadException
try:
model_info = clients["guest_0"].deploy_model(
model_id=response.model_info["model_id"],
model_version=response.model_info["model_version"],
dsl=predict_job.job_dsl.as_dict())
except Exception:
_raise()
else:
model_info = response.model_info
if 'cache_deps' in deps_data.keys() and \
job_name == deps_data.get('cache_deps', None).get('name', None):
cache_dsl = predict_job.job_dsl.as_dict()
cache_info = []
for cpn in cache_dsl.get("components").keys():
if "CacheLoader" in cache_dsl.get("components").get(cpn).get("module"):
cache_info.append({cpn: {'job_id': response.job_id}})
cache_info = {'hierarchy': [""], 'cache_info': cache_info}
if 'model_loader_deps' in deps_data.keys() and \
job_name == deps_data.get('model_loader_deps', None).get('name', None):
model_loader_dsl = predict_job.job_dsl.as_dict()
model_loader_info = []
for cpn in model_loader_dsl.get("components").keys():
if "ModelLoader" in model_loader_dsl.get("components").get(cpn).get("module"):
model_loader_info.append({cpn: response.model_info})
model_loader_info = {'hierarchy': [""], 'model_loader_info': model_loader_info}
suite.feed_dep_info(predict_job, job_name, model_info=model_info, table_info=table_info,
cache_info=cache_info, model_loader_info=model_loader_info)
suite.remove_dependency(job_name)
update_bar(0)
echo.stdout_newline()
time_list.append(time.time() - start)
return [str(int(i)) + "s" for i in time_list]
def _run_pipeline_jobs(config: Config, suite: Testsuite, namespace: str, data_namespace_mangling: bool):
# pipeline demo goes here
job_n = len(suite.pipeline_jobs)
for i, pipeline_job in enumerate(suite.pipeline_jobs):
echo.echo(f"Running [{i + 1}/{job_n}] job: {pipeline_job.job_name}")
def _raise(err_msg, status="failed"):
exception_id = str(uuid.uuid1())
suite.update_status(job_name=job_name, exception_id=exception_id, status=status)
echo.file(f"exception({exception_id}), error message:\n{err_msg}")
# LOGGER.exception(f"exception id: {exception_id}")
job_name, script_path = pipeline_job.job_name, pipeline_job.script_path
mod = _load_module_from_script(script_path)
try:
if data_namespace_mangling:
try:
mod.main(config=config, namespace=f"_{namespace}")
suite.update_status(job_name=job_name, status="success")
except Exception as e:
_raise(e)
continue
else:
try:
mod.main(config=config)
suite.update_status(job_name=job_name, status="success")
except Exception as e:
_raise(e)
continue
except Exception as e:
_raise(e, status="not submitted")
continue
| 16,647 | 51.188088 | 116 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/cli.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from fate_test.scripts._options import SharedOptions
from fate_test.scripts.benchmark_cli import run_benchmark
from fate_test.scripts.config_cli import config_group
from fate_test.scripts.data_cli import data_group
from fate_test.scripts.flow_test_cli import flow_group
from fate_test.scripts.performance_cli import run_task
from fate_test.scripts.pipeline_conversion_cli import convert_group
from fate_test.scripts.quick_test_cli import unittest_group
from fate_test.scripts.secure_protocol_cli import secure_protocol_group
from fate_test.scripts.testsuite_cli import run_suite
commands = {
"config": config_group,
"suite": run_suite,
"performance": run_task,
"benchmark-quality": run_benchmark,
"data": data_group,
"flow-test": flow_group,
"unittest": unittest_group,
"convert": convert_group,
"op-test": secure_protocol_group,
}
commands_alias = {
"bq": "benchmark-quality",
"bp": "performance"
}
class MultiCLI(click.MultiCommand):
def list_commands(self, ctx):
return list(commands)
def get_command(self, ctx, name):
if name not in commands and name in commands_alias:
name = commands_alias[name]
if name not in commands:
ctx.fail("No such command '{}'.".format(name))
return commands[name]
@click.command(cls=MultiCLI, help="A collection of useful tools to running FATE's test.",
context_settings=dict(help_option_names=["-h", "--help"]))
@SharedOptions.get_shared_options()
@click.pass_context
def cli(ctx, **kwargs):
ctx.ensure_object(SharedOptions)
ctx.obj.update(**kwargs)
if __name__ == '__main__':
cli(obj=SharedOptions())
| 2,312 | 30.684932 | 89 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/config_cli.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
import click
from fate_test._client import Clients
from fate_test._config import create_config, default_config, parse_config
from fate_test.scripts._options import SharedOptions
@click.group("config", help="fate_test config")
def config_group():
"""
config fate_test
"""
pass
@config_group.command(name="new")
def _new():
"""
create new fate_test config temperate
"""
create_config(Path("fate_test_config.yaml"))
click.echo(f"create config file: fate_test_config.yaml")
@config_group.command(name="edit")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def _edit(ctx, **kwargs):
"""
edit fate_test config file
"""
ctx.obj.update(**kwargs)
config = ctx.obj.get("config")
click.edit(filename=config)
@config_group.command(name="show")
def _show():
"""
show fate_test default config path
"""
click.echo(f"default config path is {default_config()}")
@config_group.command(name="check")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def _config(ctx, **kwargs):
"""
check connection
"""
ctx.obj.update(**kwargs)
config_inst = parse_config(ctx.obj.get("config"))
with Clients(config_inst) as clients:
roles = clients.all_roles()
for r in roles:
try:
version, address = clients[r].check_connection()
except Exception as e:
click.echo(f"[X]connection fail, role is {r}, exception is {e.args}")
else:
click.echo(f"[✓]connection {address} ok, fate version is {version}, role is {r}")
| 2,266 | 27.3375 | 97 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/pipeline_conversion_cli.py
|
import copy
import os
import shutil
import sys
import time
import uuid
import json
import click
import importlib
from fate_test._config import Config
from fate_test._io import LOGGER, echo
from fate_test.scripts._options import SharedOptions
@click.group(name="convert")
def convert_group():
"""
Converting pipeline files to dsl v2
"""
...
@convert_group.command("pipeline-to-dsl")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *pipeline.py under these paths")
@click.option('-o', '--output-path', type=click.Path(exists=True), help="DSL output path, default to *pipeline.py path")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def to_dsl(ctx, include, output_path, **kwargs):
"""
This command will run pipeline, make sure data is uploaded
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"converting namespace: {namespace}", fg='red')
for path in include:
echo.echo(f"pipeline path: {os.path.abspath(path)}")
if not yes and not click.confirm("running?"):
return
config_yaml_file = './examples/config.yaml'
temp_file_path = f'./logs/{namespace}/temp_pipeline.py'
for i in include:
try:
convert(i, temp_file_path, config_yaml_file, output_path, config_inst)
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"converting namespace: {namespace}", fg='red')
@convert_group.command("pipeline-testsuite-to-dsl-testsuite")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), metavar="<include>",
help="include is the pipeline test folder containing *testsuite.py")
@click.option('-t', '--template-path', required=False, type=click.Path(exists=True), metavar="<include>",
help="specify the test template to use")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def to_testsuite(ctx, include, template_path, **kwargs):
"""
convert pipeline testsuite to dsl testsuite
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
if not os.path.isdir(include):
raise Exception("Please fill in a folder.")
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo(f"pipeline path: {os.path.abspath(include)}")
if not yes and not click.confirm("running?"):
return
input_path = os.path.abspath(include)
input_list = [input_path]
i = 0
while i < len(input_list):
dirs = os.listdir(input_list[i])
for d in dirs:
if os.path.isdir(d):
input_list.append(d)
i += 1
for file_path in input_list:
try:
module_name = os.path.basename(file_path)
do_generated(file_path, module_name, template_path, config_inst)
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"converting namespace: {namespace}", fg='red')
def make_temp_pipeline(pipeline_file, temp_file_path, folder_name):
def _conf_file_update(_line, k, end, conf_file=None):
if ")" in _line[0]:
if conf_file is None:
conf_file = os.path.abspath(folder_name + "/" + _line[0].replace("'", "").replace('"', "").
replace(")", "").replace(":", "").replace("\n", ""))
_line = k + conf_file + end
else:
if conf_file is None:
conf_file = os.path.abspath(folder_name + "/" + _line[0].replace('"', ""))
_line = k + conf_file + '",' + _line[-1]
return conf_file, _line
def _get_conf_file(_lines):
param_default = False
conf_file = None
for _line in _lines:
if "--param" in _line or param_default:
if "default" in _line:
_line_start = _line.split("default=")
_line_end = _line_start[1].split(",")
conf_file, _ = _conf_file_update(_line_end, 'default="', '")')
param_default = False
else:
param_default = True
return conf_file
code_list = []
with open(pipeline_file, 'r') as f:
lines = f.readlines()
start_main = False
has_returned = False
space_num = 0
conf_file_dir = _get_conf_file(lines)
for line in lines:
if line is None:
continue
elif "def main" in line:
for char in line:
if char.isspace():
space_num += 1
else:
break
start_main = True
if "param=" in line:
line_start = line.split("param=")
line_end = line_start[1].split(",")
conf_file_dir, line = _conf_file_update(line_end, 'param="', '")', conf_file_dir)
line = line_start[0] + line
elif start_main and "def " in line and not has_returned:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
start_main = False
elif start_main and "return " in line:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
start_main = False
continue
elif start_main and 'if __name__ ==' in line:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
start_main = False
code_list.append(line)
if start_main:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
with open(temp_file_path, 'w') as f:
f.writelines(code_list)
def convert(pipeline_file, temp_file_path, config_yaml_file, output_path, config: Config):
folder_name, file_name = os.path.split(pipeline_file)
if output_path is not None:
folder_name = output_path
echo.echo(f"folder_name: {os.path.abspath(folder_name)}, file_name: {file_name}")
conf_name = file_name.replace('.py', '_conf.json')
dsl_name = file_name.replace('.py', '_dsl.json')
conf_name = os.path.join(folder_name, conf_name)
dsl_name = os.path.join(folder_name, dsl_name)
make_temp_pipeline(pipeline_file, temp_file_path, folder_name)
additional_path = os.path.realpath(os.path.join(os.path.curdir, pipeline_file, os.pardir, os.pardir))
if additional_path not in sys.path:
sys.path.append(additional_path)
loader = importlib.machinery.SourceFileLoader("main", str(temp_file_path))
spec = importlib.util.spec_from_loader(loader.name, loader)
mod = importlib.util.module_from_spec(spec)
loader.exec_module(mod)
my_pipeline = mod.main(os.path.join(config.data_base_dir, config_yaml_file))
conf = my_pipeline.get_train_conf()
dsl = my_pipeline.get_train_dsl()
os.remove(temp_file_path)
with open(conf_name, 'w') as f:
json.dump(conf, f, indent=4)
echo.echo('conf name is {}'.format(os.path.abspath(conf_name)))
with open(dsl_name, 'w') as f:
json.dump(dsl, f, indent=4)
echo.echo('dsl name is {}'.format(os.path.abspath(dsl_name)))
def insert_extract_code(file_path):
code_lines = []
code = \
"""
import json
import os
def extract(my_pipeline, file_name, output_path='dsl_testsuite'):
out_name = file_name.split('/')[-1]
out_name = out_name.replace('pipeline-', '').replace('.py', '').replace('-', '_')
conf = my_pipeline.get_train_conf()
dsl = my_pipeline.get_train_dsl()
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
conf_name = os.path.join(cur_dir, output_path, f"{out_name}_conf.json")
dsl_name = os.path.join(cur_dir, output_path, f"{out_name}_dsl.json")
json.dump(conf, open(conf_name, 'w'), indent=4)
json.dump(dsl, open(dsl_name, 'w'), indent=4)
"""
code_lines.append(code)
screen_keywords = [".predict(", ".fit(", ".deploy_component(", "predict_pipeline ",
"predict_pipeline."]
continue_to_screen = False
has_return = False
with open(file_path, 'r') as f:
lines = f.readlines()
for l in lines:
if ".predict(" in l or ".fit(" in l:
code_lines.append(f"# {l}")
elif 'if __name__ == "__main__":' in l:
if not has_return:
code_lines.append(" extract(pipeline, __file__)\n")
code_lines.append(l)
elif 'return' in l:
code_lines.append(" extract(pipeline, __file__)\n")
# code_lines.append(l)
has_return = True
elif "get_summary()" in l:
continue
elif continue_to_screen:
code_lines.append(f"# {l}")
if ")" in l:
continue_to_screen = False
else:
should_append = True
for key_word in screen_keywords:
if key_word in l:
code_lines.append(f"# {l}")
should_append = False
if ")" not in l:
continue_to_screen = True
if should_append:
code_lines.append(l)
return code_lines
def get_testsuite_file(testsuite_file_path):
echo.echo(f"testsuite_file_path: {testsuite_file_path}")
with open(testsuite_file_path, 'r', encoding='utf-8') as load_f:
testsuite_json = json.load(load_f)
if "tasks" in testsuite_json:
del testsuite_json["tasks"]
if "pipeline_tasks" in testsuite_json:
del testsuite_json["pipeline_tasks"]
return testsuite_json
def do_generated(file_path, fold_name, template_path, config: Config):
yaml_file = os.path.join(config.data_base_dir, "./examples/config.yaml")
PYTHONPATH = os.environ.get('PYTHONPATH') + ":" + str(config.data_base_dir)
os.environ['PYTHONPATH'] = PYTHONPATH
if not os.path.isdir(file_path):
return
files = os.listdir(file_path)
if template_path is None:
for f in files:
if "testsuite" in f and "generated_testsuite" not in f:
template_path = os.path.join(file_path, f)
break
if template_path is None:
return
suite_json = get_testsuite_file(template_path)
pipeline_suite = copy.deepcopy(suite_json)
suite_json["tasks"] = {}
pipeline_suite["pipeline_tasks"] = {}
replaced_path = os.path.join(file_path, 'replaced_code')
generated_path = os.path.join(file_path, 'dsl_testsuite')
if not os.path.exists(replaced_path):
os.system('mkdir {}'.format(replaced_path))
if not os.path.exists(generated_path):
os.system('mkdir {}'.format(generated_path))
for f in files:
if not f.startswith("pipeline"):
continue
echo.echo(f)
task_name = f.replace(".py", "")
task_name = "-".join(task_name.split('-')[1:])
pipeline_suite["pipeline_tasks"][task_name] = {
"script": f
}
f_path = os.path.join(file_path, f)
code_str = insert_extract_code(f_path)
pipeline_file_path = os.path.join(replaced_path, f)
open(pipeline_file_path, 'w').writelines(code_str)
exe_files = os.listdir(replaced_path)
fail_job_count = 0
task_type_list = []
exe_conf_file = None
exe_dsl_file = None
for i, f in enumerate(exe_files):
abs_file = os.path.join(replaced_path, f)
echo.echo('\n' + '[{}/{}] executing {}'.format(i + 1, len(exe_files), abs_file), fg='red')
result = os.system(f"python {abs_file} -config {yaml_file}")
if not result:
time.sleep(3)
conf_files = os.listdir(generated_path)
f_dsl = {"_".join(f.split('_')[:-1]): f for f in conf_files if 'dsl.json' in f}
f_conf = {"_".join(f.split('_')[:-1]): f for f in conf_files if 'conf.json' in f}
for task_type, dsl_file in f_dsl.items():
if task_type not in task_type_list:
exe_dsl_file = dsl_file
task_type_list.append(task_type)
exe_conf_file = f_conf[task_type]
suite_json['tasks'][task_type] = {
"conf": exe_conf_file,
"dsl": exe_dsl_file
}
echo.echo('conf name is {}'.format(os.path.join(file_path, "dsl_testsuite", exe_conf_file)))
echo.echo('dsl name is {}'.format(os.path.join(file_path, "dsl_testsuite", exe_dsl_file)))
else:
echo.echo('profile generation failed')
fail_job_count += 1
suite_path = os.path.join(generated_path, f"{fold_name}_testsuite.json")
with open(suite_path, 'w', encoding='utf-8') as json_file:
json.dump(suite_json, json_file, ensure_ascii=False, indent=4)
suite_path = os.path.join(file_path, f"{fold_name}_pipeline_testsuite.json")
with open(suite_path, 'w', encoding='utf-8') as json_file:
json.dump(pipeline_suite, json_file, ensure_ascii=False, indent=4)
shutil.rmtree(replaced_path)
if not fail_job_count:
echo.echo("Generate testsuite and dsl&conf finished!")
else:
echo.echo("Generate testsuite and dsl&conf finished! {} failures".format(fail_job_count))
| 14,075 | 37.883978 | 120 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/flow_test_cli.py
|
import os
import time
import uuid
import click
from datetime import timedelta
from pathlib import Path
from ruamel import yaml
from fate_test._config import Config
from fate_test._io import LOGGER, echo
from fate_test.scripts._options import SharedOptions
from fate_test.flow_test import flow_rest_api, flow_sdk_api, flow_cli_api, flow_process
@click.group(name="flow-test")
def flow_group():
"""
flow test
"""
...
@flow_group.command("process")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def process(ctx, **kwargs):
"""
flow process test
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if not yes and not click.confirm("running?"):
return
try:
start = time.time()
flow_process.run_fate_flow_test(get_role(conf=config_inst))
echo.echo(f"elapse {timedelta(seconds=int(time.time() - start))}", fg='red')
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@flow_group.command("rest")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def api(ctx, **kwargs):
"""
flow rest api test
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if not yes and not click.confirm("running?"):
return
try:
start = time.time()
flow_rest_api.run_test_api(get_role(conf=config_inst), namespace)
echo.echo(f"elapse {timedelta(seconds=int(time.time() - start))}", fg='red')
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@flow_group.command("sdk")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def api(ctx, **kwargs):
"""
flow sdk api test
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if not yes and not click.confirm("running?"):
return
try:
start = time.time()
flow_sdk_api.run_test_api(get_role(conf=config_inst), namespace)
echo.echo(f"elapse {timedelta(seconds=int(time.time() - start))}", fg='red')
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@flow_group.command("cli")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def api(ctx, **kwargs):
"""
flow cli api test
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if not yes and not click.confirm("running?"):
return
try:
start = time.time()
flow_cli_api.run_test_api(get_role(conf=config_inst), namespace)
echo.echo(f"elapse {timedelta(seconds=int(time.time() - start))}", fg='red')
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
def get_role(conf: Config):
flow_services = conf.serving_setting['flow_services'][0]['address']
path = conf.flow_test_config_dir
if isinstance(path, str):
path = Path(path)
config = {}
if path is not None:
with path.open("r") as f:
config.update(yaml.safe_load(f))
flow_test_template = config['flow_test_template']
config_json = {'guest_party_id': conf.role['guest'],
'host_party_id': [conf.role['host'][0]],
'arbiter_party_id': conf.role['arbiter'],
'online_serving': conf.serving_setting['serving_setting']['address'],
'train_conf_path': os.path.abspath(conf.data_base_dir) + flow_test_template['train_conf_path'],
'train_dsl_path': os.path.abspath(conf.data_base_dir) + flow_test_template['train_dsl_path'],
'predict_conf_path': os.path.abspath(conf.data_base_dir) + flow_test_template['predict_conf_path'],
'predict_dsl_path': os.path.abspath(conf.data_base_dir) + flow_test_template['predict_dsl_path'],
'upload_file_path': os.path.abspath(conf.data_base_dir) + flow_test_template['upload_conf_path'],
'model_file_path': os.path.abspath(conf.data_base_dir) + flow_test_template['model_conf_path'],
'server_url': "http://{}/{}".format(flow_services, config['api_version']),
'train_auc': config['train_auc'],
'phone_num': config['phone_num'],
'component_name': config['component_name'],
'component_is_homo': config.get('component_is_homo', False),
'serving_setting': conf.serving_setting['serving_setting']['address'],
'metric_output_path': config['metric_output_path'],
'model_output_path': config['model_output_path'],
'cache_directory': conf.cache_directory,
'data_base_dir': conf.data_base_dir
}
if flow_test_template.get('homo_deploy_path'):
config_json['homo_deploy_path'] = os.path.abspath(conf.data_base_dir) + flow_test_template['homo_deploy_path']
if flow_test_template.get('homo_deploy_kube_config_path'):
config_json['homo_deploy_kube_config_path'] = os.path.abspath(conf.data_base_dir) + \
flow_test_template['homo_deploy_kube_config_path']
return config_json
| 6,498 | 35.105556 | 118 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/data_cli.py
|
import os
import re
import sys
import time
import uuid
import json
from datetime import timedelta
import click
from pathlib import Path
from ruamel import yaml
from fate_test import _config
from fate_test._config import Config
from fate_test._client import Clients
from fate_test._io import LOGGER, echo
from fate_test.scripts._options import SharedOptions
from fate_test.scripts._utils import _upload_data, _load_testsuites, _delete_data, _big_data_task
@click.group(name="data")
def data_group():
"""
upload or delete data in suite config files
"""
...
@data_group.command("upload")
@click.option('-i', '--include', required=False, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *benchmark.json under these paths")
@click.option('-e', '--exclude', type=click.Path(exists=True), multiple=True,
help="exclude *benchmark.json under these paths")
@click.option("-t", "--config-type", type=click.Choice(["min_test", "all_examples"]), default="min_test",
help="config file")
@click.option('-g', '--glob', type=str,
help="glob string to filter sub-directory of path specified by <include>")
@click.option('-s', '--suite-type', required=False, type=click.Choice(["testsuite", "benchmark"]), default="testsuite",
help="suite type")
@click.option('-r', '--role', type=str, default='all', help="role to process, default to `all`. "
"use option likes: `guest_0`, `host_0`, `host`")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def upload(ctx, include, exclude, glob, suite_type, role, config_type, **kwargs):
"""
upload data defined in suite config files
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
if ctx.obj["extend_sid"] is not None:
config_inst.extend_sid = ctx.obj["extend_sid"]
if ctx.obj["auto_increasing_sid"] is not None:
config_inst.auto_increasing_sid = ctx.obj["auto_increasing_sid"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if len(include) != 0:
echo.echo("loading testsuites:")
suffix = "benchmark.json" if suite_type == "benchmark" else "testsuite.json"
suites = _load_testsuites(includes=include, excludes=exclude, glob=glob,
suffix=suffix, suite_type=suite_type)
for suite in suites:
if role != "all":
suite.dataset = [d for d in suite.dataset if re.match(d.role_str, role)]
echo.echo(f"\tdataset({len(suite.dataset)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
client_upload(suites=suites, config_inst=config_inst, namespace=namespace)
else:
config = get_config(config_inst)
if config_type == 'min_test':
config_file = config.min_test_data_config
else:
config_file = config.all_examples_data_config
with open(config_file, 'r', encoding='utf-8') as f:
upload_data = json.loads(f.read())
echo.echo(f"\tdataset({len(upload_data['data'])}) {config_file}")
if not yes and not click.confirm("running?"):
return
with Clients(config_inst) as client:
data_upload(client, config_inst, upload_data)
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@data_group.command("delete")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *benchmark.json under these paths")
@click.option('-e', '--exclude', type=click.Path(exists=True), multiple=True,
help="exclude *benchmark.json under these paths")
@click.option('-g', '--glob', type=str,
help="glob string to filter sub-directory of path specified by <include>")
@click.option('-s', '--suite-type', required=True, type=click.Choice(["testsuite", "benchmark"]), help="suite type")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def delete(ctx, include, exclude, glob, yes, suite_type, **kwargs):
"""
delete data defined in suite config files
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo("loading testsuites:")
suffix = "benchmark.json" if suite_type == "benchmark" else "testsuite.json"
suites = _load_testsuites(includes=include, excludes=exclude, glob=glob,
suffix=suffix, suite_type=suite_type)
if not yes and not click.confirm("running?"):
return
for suite in suites:
echo.echo(f"\tdataset({len(suite.dataset)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
with Clients(config_inst) as client:
for i, suite in enumerate(suites):
_delete_data(client, suite)
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@data_group.command("generate")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *testsuite.json / *benchmark.json under these paths")
@click.option('-ht', '--host-data-type', default='tag_value', type=click.Choice(['dense', 'tag', 'tag_value']),
help="Select the format of the host data")
@click.option('-p', '--encryption-type', type=click.Choice(['sha256', 'md5']),
help="Entry ID encryption method for, sha256 and md5")
@click.option('-m', '--match-rate', default=1.0, type=float,
help="Intersection rate relative to guest, between [0, 1]")
@click.option('-s', '--sparsity', default=0.2, type=float,
help="The sparsity of tag data, The value is between (0-1)")
@click.option('-ng', '--guest-data-size', type=int, default=10000,
help="Set guest data set size, not less than 100")
@click.option('-nh', '--host-data-size', type=int,
help="Set host data set size, not less than 100")
@click.option('-fg', '--guest-feature-num', type=int, default=20,
help="Set guest feature dimensions")
@click.option('-fh', '--host-feature-num', type=int, default=200,
help="Set host feature dimensions; the default is equal to the number of guest's size")
@click.option('-o', '--output-path', type=click.Path(exists=True),
help="Customize the output path of generated data")
@click.option('--force', is_flag=True, default=False,
help="Overwrite existing file")
@click.option('--split-host', is_flag=True, default=False,
help="Divide the amount of host data equally among all the host tables in TestSuite")
@click.option('--upload-data', is_flag=True, default=False,
help="Generated data will be uploaded")
@click.option('--remove-data', is_flag=True, default=False,
help="The generated data will be deleted")
@click.option('--parallelize', is_flag=True, default=False,
help="It is directly used to upload data, and will not generate data")
@click.option('--use-local-data', is_flag=True, default=False,
help="The existing data of the server will be uploaded, This parameter is not recommended for "
"distributed applications")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def generate(ctx, include, host_data_type, encryption_type, match_rate, sparsity, guest_data_size,
host_data_size, guest_feature_num, host_feature_num, output_path, force, split_host, upload_data,
remove_data, use_local_data, parallelize, **kwargs):
"""
create data defined in suite config files
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
if ctx.obj["extend_sid"] is not None:
config_inst.extend_sid = ctx.obj["extend_sid"]
if ctx.obj["auto_increasing_sid"] is not None:
config_inst.auto_increasing_sid = ctx.obj["auto_increasing_sid"]
if parallelize and upload_data:
upload_data = False
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo("loading testsuites:")
if host_data_size is None:
host_data_size = guest_data_size
suites = _load_testsuites(includes=include, excludes=tuple(), glob=None)
suites += _load_testsuites(includes=include, excludes=tuple(), glob=None,
suffix="benchmark.json", suite_type="benchmark")
for suite in suites:
if upload_data:
echo.echo(f"\tdataget({len(suite.dataset)}) dataset({len(suite.dataset)}) {suite.path}")
else:
echo.echo(f"\tdataget({len(suite.dataset)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
_big_data_task(include, guest_data_size, host_data_size, guest_feature_num, host_feature_num, host_data_type,
config_inst, encryption_type, match_rate, sparsity, force, split_host, output_path, parallelize)
if upload_data:
if use_local_data:
_config.use_local_data = 0
_config.data_switch = remove_data
client_upload(suites=suites, config_inst=config_inst, namespace=namespace, output_path=output_path)
@data_group.command("download")
@click.option("-t", "--type", type=click.Choice(["mnist"]), default="mnist",
help="config file")
@click.option('-o', '--output-path', type=click.Path(exists=True),
help="output path of mnist data, the default path is examples/data")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def download_mnists(ctx, output_path, **kwargs):
"""
download mnist data for flow
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if output_path is None:
config = get_config(config_inst)
output_path = str(config.data_base_dir) + "/examples/data/"
if not yes and not click.confirm("running?"):
return
try:
download_mnist(Path(output_path), "mnist_train")
download_mnist(Path(output_path), "mnist_eval", is_train=False)
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@data_group.command("query_schema")
@click.option('-cpn', '--component-name', required=False, type=str, help="component name", default='dataio_0')
@click.option('-j', '--job-id', required=True, type=str, help="job id")
@click.option('-r', '--role', required=True, type=click.Choice(["guest", "host", "arbiter"]), help="job id")
@click.option('-p', '--party-id', required=True, type=str, help="party id")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def query_schema(ctx, component_name, job_id, role, party_id, **kwargs):
"""
query the meta of the output data of a component
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
yes = ctx.obj["yes"]
config_inst = ctx.obj["config"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if not yes and not click.confirm("running?"):
return
with Clients(config_inst) as client:
query_component_output_data(client, config_inst, component_name, job_id, role, party_id)
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
def get_config(conf: Config):
return conf
def query_component_output_data(clients: Clients, config: Config, component_name, job_id, role, party_id):
roles = config.role
clients_role = None
for k, v in roles.items():
if int(party_id) in v and k == role:
clients_role = role + "_" + str(v.index(int(party_id)))
try:
if clients_role is None:
raise ValueError(f"party id {party_id} does not exist")
try:
table_info = clients[clients_role].output_data_table(job_id=job_id, role=role, party_id=party_id,
component_name=component_name)
table_info = clients[clients_role].table_info(table_name=table_info['name'],
namespace=table_info['namespace'])
except Exception as e:
raise RuntimeError(f"An exception occurred while getting data {clients_role}<-{component_name}") from e
echo.echo("query_component_output_data result: {}".format(table_info))
try:
header = table_info['data']['schema']['header']
except ValueError as e:
raise ValueError(f"Obtain header from table error, error msg: {e}")
result = []
for idx, header_name in enumerate(header[1:]):
result.append((idx, header_name))
echo.echo("Queried header is {}".format(result))
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
def download_mnist(base, name, is_train=True):
import torchvision
dataset = torchvision.datasets.MNIST(
root=base.joinpath(".cache"), train=is_train, download=True
)
converted_path = base.joinpath(name)
converted_path.mkdir(exist_ok=True)
inputs_path = converted_path.joinpath("images")
inputs_path.mkdir(exist_ok=True)
targets_path = converted_path.joinpath("targets")
config_path = converted_path.joinpath("config.yaml")
filenames_path = converted_path.joinpath("filenames")
with filenames_path.open("w") as filenames:
with targets_path.open("w") as targets:
for idx, (img, target) in enumerate(dataset):
filename = f"{idx:05d}"
# save img
img.save(inputs_path.joinpath(f"{filename}.jpg"))
# save target
targets.write(f"{filename},{target}\n")
# save filenames
filenames.write(f"{filename}\n")
config = {
"type": "vision",
"inputs": {"type": "images", "ext": "jpg", "PIL_mode": "L"},
"targets": {"type": "integer"},
}
with config_path.open("w") as f:
yaml.safe_dump(config, f, indent=2, default_flow_style=False)
def client_upload(suites, config_inst, namespace, output_path=None):
with Clients(config_inst) as client:
for i, suite in enumerate(suites):
# noinspection PyBroadException
try:
echo.echo(f"[{i + 1}/{len(suites)}]start at {time.strftime('%Y-%m-%d %X')} {suite.path}", fg='red')
try:
_upload_data(client, suite, config_inst, output_path)
except Exception as e:
raise RuntimeError(f"exception occur while uploading data for {suite.path}") from e
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception in {suite.path}, exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
def data_upload(clients: Clients, conf: Config, upload_config):
def _await_finish(job_id, task_name=None):
deadline = time.time() + sys.maxsize
start = time.time()
param = dict(
job_id=job_id,
role=None
)
while True:
stdout = clients["guest_0"].flow_client("job/query", param)
status = stdout["data"][0]["f_status"]
elapse_seconds = int(time.time() - start)
date = time.strftime('%Y-%m-%d %X')
if task_name:
log_msg = f"[{date}][{task_name}]{status}, elapse: {timedelta(seconds=elapse_seconds)}"
else:
log_msg = f"[{date}]{job_id} {status}, elapse: {timedelta(seconds=elapse_seconds)}"
if (status == "running" or status == "waiting") and time.time() < deadline:
print(log_msg, end="\r")
time.sleep(1)
continue
else:
print(" " * 60, end="\r") # clean line
echo.echo(log_msg)
return status
task_data = upload_config["data"]
for i, data in enumerate(task_data):
format_msg = f"@{data['file']} >> {data['namespace']}.{data['table_name']}"
echo.echo(f"[{time.strftime('%Y-%m-%d %X')}]uploading {format_msg}")
try:
data["file"] = str(os.path.join(conf.data_base_dir, data["file"]))
param = dict(
file=data["file"],
head=data["head"],
partition=data["partition"],
table_name=data["table_name"],
namespace=data["namespace"]
)
stdout = clients["guest_0"].flow_client("data/upload", param, drop=1)
job_id = stdout.get('jobId', None)
echo.echo(f"[{time.strftime('%Y-%m-%d %X')}]upload done {format_msg}, job_id={job_id}\n")
if job_id is None:
echo.echo("table already exist. To upload again, Please add '-f 1' in start cmd")
continue
_await_finish(job_id)
param = dict(
table_name=data["table_name"],
namespace=data["namespace"]
)
stdout = clients["guest_0"].flow_client("table/info", param)
count = stdout["data"]["count"]
if count != data["count"]:
raise AssertionError("Count of upload file is not as expect, count is: {},"
"expect is: {}".format(count, data["count"]))
echo.echo(f"[{time.strftime('%Y-%m-%d %X')}] check_data_out {stdout} \n")
except Exception as e:
exception_id = uuid.uuid1()
echo.echo(f"exception in {data['file']}, exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
echo.echo(f"upload {i + 1}th data {data['table_name']} fail, exception_id: {exception_id}")
# raise RuntimeError(f"exception occur while uploading data for {data['file']}") from e
finally:
echo.stdout_newline()
| 19,039 | 43.176334 | 119 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/generate_mock_data.py
|
import hashlib
import json
import os
import random
import threading
import sys
import time
import uuid
import functools
import pandas as pd
import numpy as np
from fate_test._config import Config
from fate_test._io import echo, LOGGER
def import_fate():
from fate_arch import storage
from fate_flow.utils import data_utils
from fate_arch import session
from fate_arch.storage import StorageEngine
from fate_arch.common.conf_utils import get_base_config
from fate_arch.storage import EggRollStoreType
return storage, data_utils, session, StorageEngine, get_base_config, EggRollStoreType
storage, data_utils, session, StorageEngine, get_base_config, EggRollStoreType = import_fate()
sys.setrecursionlimit(1000000)
class data_progress:
def __init__(self, down_load, time_start):
self.time_start = time_start
self.down_load = down_load
self.time_percent = 0
self.switch = True
def set_switch(self, switch):
self.switch = switch
def get_switch(self):
return self.switch
def set_time_percent(self, time_percent):
self.time_percent = time_percent
def get_time_percent(self):
return self.time_percent
def progress(self, percent):
if percent > 100:
percent = 100
end = time.time()
if percent != 100:
print(f"\r{self.down_load} %.f%s [%s] running" % (percent, '%', self.timer(end - self.time_start)),
flush=True, end='')
else:
print(f"\r{self.down_load} %.f%s [%s] success" % (percent, '%', self.timer(end - self.time_start)),
flush=True, end='')
@staticmethod
def timer(times):
hours, rem = divmod(times, 3600)
minutes, seconds = divmod(rem, 60)
return "{:0>2}:{:0>2}:{:0>2}".format(int(hours), int(minutes), int(seconds))
def remove_file(path):
os.remove(path)
def id_encryption(encryption_type, start_num, end_num):
if encryption_type == 'md5':
return [hashlib.md5(bytes(str(value), encoding='utf-8')).hexdigest() for value in range(start_num, end_num)]
elif encryption_type == 'sha256':
return [hashlib.sha256(bytes(str(value), encoding='utf-8')).hexdigest() for value in range(start_num, end_num)]
else:
return [str(value) for value in range(start_num, end_num)]
def get_big_data(guest_data_size, host_data_size, guest_feature_num, host_feature_num, include_path, host_data_type,
conf: Config, encryption_type, match_rate, sparsity, force, split_host, output_path, parallelize):
global big_data_dir
def list_tag_value(feature_nums, head):
# data = ''
# for f in range(feature_nums):
# data += head[f] + ':' + str(round(np.random.randn(), 4)) + ";"
# return data[:-1]
return ";".join([head[k] + ':' + str(round(v, 4)) for k, v in enumerate(np.random.randn(feature_nums))])
def list_tag(feature_nums, data_list):
data = ''
for f in range(feature_nums):
data += random.choice(data_list) + ";"
return data[:-1]
def _generate_tag_value_data(data_path, start_num, end_num, feature_nums, progress):
data_num = end_num - start_num
section_data_size = round(data_num / 100)
iteration = round(data_num / section_data_size)
head = ['x' + str(i) for i in range(feature_nums)]
for batch in range(iteration + 1):
progress.set_time_percent(batch)
output_data = pd.DataFrame(columns=["id"])
if section_data_size * (batch + 1) <= data_num:
output_data["id"] = id_encryption(encryption_type, section_data_size * batch + start_num,
section_data_size * (batch + 1) + start_num)
slicing_data_size = section_data_size
elif section_data_size * batch < data_num:
output_data['id'] = id_encryption(encryption_type, section_data_size * batch + start_num, end_num)
slicing_data_size = data_num - section_data_size * batch
else:
break
feature = [list_tag_value(feature_nums, head) for i in range(slicing_data_size)]
output_data['feature'] = feature
output_data.to_csv(data_path, mode='a+', index=False, header=False)
def _generate_dens_data(data_path, start_num, end_num, feature_nums, label_flag, progress):
if label_flag:
head_1 = ['id', 'y']
else:
head_1 = ['id']
data_num = end_num - start_num
head_2 = ['x' + str(i) for i in range(feature_nums)]
df_data_1 = pd.DataFrame(columns=head_1)
head_data = pd.DataFrame(columns=head_1 + head_2)
head_data.to_csv(data_path, mode='a+', index=False)
section_data_size = round(data_num / 100)
iteration = round(data_num / section_data_size)
for batch in range(iteration + 1):
progress.set_time_percent(batch)
if section_data_size * (batch + 1) <= data_num:
df_data_1["id"] = id_encryption(encryption_type, section_data_size * batch + start_num,
section_data_size * (batch + 1) + start_num)
slicing_data_size = section_data_size
elif section_data_size * batch < data_num:
df_data_1 = pd.DataFrame(columns=head_1)
df_data_1["id"] = id_encryption(encryption_type, section_data_size * batch + start_num, end_num)
slicing_data_size = data_num - section_data_size * batch
else:
break
if label_flag:
df_data_1["y"] = [round(np.random.random()) for x in range(slicing_data_size)]
feature = np.random.randint(-10000, 10000, size=[slicing_data_size, feature_nums]) / 10000
df_data_2 = pd.DataFrame(feature, columns=head_2)
output_data = pd.concat([df_data_1, df_data_2], axis=1)
output_data.to_csv(data_path, mode='a+', index=False, header=False)
def _generate_tag_data(data_path, start_num, end_num, feature_nums, sparsity, progress):
data_num = end_num - start_num
section_data_size = round(data_num / 100)
iteration = round(data_num / section_data_size)
valid_set = [x for x in range(2019120799, 2019120799 + round(feature_nums / sparsity))]
data = list(map(str, valid_set))
for batch in range(iteration + 1):
progress.set_time_percent(batch)
output_data = pd.DataFrame(columns=["id"])
if section_data_size * (batch + 1) <= data_num:
output_data["id"] = id_encryption(encryption_type, section_data_size * batch + start_num,
section_data_size * (batch + 1) + start_num)
slicing_data_size = section_data_size
elif section_data_size * batch < data_num:
output_data["id"] = id_encryption(encryption_type, section_data_size * batch + start_num, end_num)
slicing_data_size = data_num - section_data_size * batch
else:
break
feature = [list_tag(feature_nums, data_list=data) for i in range(slicing_data_size)]
output_data['feature'] = feature
output_data.to_csv(data_path, mode='a+', index=False, header=False)
def _generate_parallelize_data(start_num, end_num, feature_nums, table_name, namespace, label_flag, data_type,
partition, progress):
def expand_id_range(k, v):
if label_flag:
return [(id_encryption(encryption_type, ids, ids + 1)[0],
",".join([str(round(np.random.random()))] + [str(round(i, 4)) for i in np.random.randn(v)]))
for ids in range(int(k), min(step + int(k), end_num))]
else:
if data_type == 'tag':
valid_set = [x for x in range(2019120799, 2019120799 + round(feature_nums / sparsity))]
data = list(map(str, valid_set))
return [(id_encryption(encryption_type, ids, ids + 1)[0],
";".join([random.choice(data) for i in range(int(v))]))
for ids in range(int(k), min(step + int(k), data_num))]
elif data_type == 'tag_value':
return [(id_encryption(encryption_type, ids, ids + 1)[0],
";".join([f"x{i}" + ':' + str(round(i, 4)) for i in np.random.randn(v)]))
for ids in range(int(k), min(step + int(k), data_num))]
elif data_type == 'dense':
return [(id_encryption(encryption_type, ids, ids + 1)[0],
",".join([str(round(i, 4)) for i in np.random.randn(v)]))
for ids in range(int(k), min(step + int(k), data_num))]
data_num = end_num - start_num
step = 10000 if data_num > 10000 else int(data_num / 10)
table_list = [(f"{i * step}", f"{feature_nums}") for i in range(int(data_num / step) + start_num)]
table = sess.computing.parallelize(table_list, partition=partition, include_key=True)
table = table.flatMap(functools.partial(expand_id_range))
if label_flag:
schema = {"sid": "id", "header": ",".join(["y"] + [f"x{i}" for i in range(feature_nums)])}
else:
schema = {"sid": "id", "header": ",".join([f"x{i}" for i in range(feature_nums)])}
if data_type != "dense":
schema = None
h_table = sess.get_table(name=table_name, namespace=namespace)
if h_table:
h_table.destroy()
table_meta = sess.persistent(computing_table=table, name=table_name, namespace=namespace, schema=schema)
storage_session = sess.storage()
s_table = storage_session.get_table(namespace=table_meta.get_namespace(), name=table_meta.get_name())
if s_table.count() == data_num:
progress.set_time_percent(100)
from fate_flow.manager.data_manager import DataTableTracker
DataTableTracker.create_table_tracker(
table_name=table_name,
table_namespace=namespace,
entity_info={}
)
def data_save(data_info, table_names, namespaces, partition_list):
data_count = 0
for idx, data_name in enumerate(data_info.keys()):
label_flag = True if 'guest' in data_info[data_name] else False
data_type = 'dense' if 'guest' in data_info[data_name] else host_data_type
if split_host and ('host' in data_info[data_name]):
host_end_num = int(np.ceil(host_data_size / len(data_info))) * (data_count + 1) if np.ceil(
host_data_size / len(data_info)) * (data_count + 1) <= host_data_size else host_data_size
host_start_num = int(np.ceil(host_data_size / len(data_info))) * data_count
data_count += 1
else:
host_end_num = host_data_size
host_start_num = 0
out_path = os.path.join(str(big_data_dir), data_name)
if os.path.exists(out_path) and os.path.isfile(out_path) and not parallelize:
if force:
remove_file(out_path)
else:
echo.echo('{} Already exists'.format(out_path))
continue
data_i = (idx + 1) / len(data_info)
downLoad = f'dataget [{"#" * int(24 * data_i)}{"-" * (24 - int(24 * data_i))}] {idx + 1}/{len(data_info)}'
start = time.time()
progress = data_progress(downLoad, start)
thread = threading.Thread(target=run, args=[progress])
thread.start()
try:
if 'guest' in data_info[data_name]:
if not parallelize:
_generate_dens_data(out_path, guest_start_num, guest_end_num,
guest_feature_num, label_flag, progress)
else:
_generate_parallelize_data(
guest_start_num,
guest_end_num,
guest_feature_num,
table_names[idx],
namespaces[idx],
label_flag,
data_type,
partition_list[idx],
progress)
else:
if data_type == 'tag' and not parallelize:
_generate_tag_data(out_path, host_start_num, host_end_num, host_feature_num, sparsity, progress)
elif data_type == 'tag_value' and not parallelize:
_generate_tag_value_data(out_path, host_start_num, host_end_num, host_feature_num, progress)
elif data_type == 'dense' and not parallelize:
_generate_dens_data(out_path, host_start_num, host_end_num,
host_feature_num, label_flag, progress)
elif parallelize:
_generate_parallelize_data(
host_start_num,
host_end_num,
host_feature_num,
table_names[idx],
namespaces[idx],
label_flag,
data_type,
partition_list[idx],
progress)
progress.set_switch(False)
time.sleep(1)
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
progress.set_switch(False)
echo.stdout_newline()
def run(p):
while p.get_switch():
time.sleep(1)
p.progress(p.get_time_percent())
if not match_rate > 0 or not match_rate <= 1:
raise Exception(f"The value is between (0-1), Please check match_rate:{match_rate}")
guest_start_num = host_data_size - int(guest_data_size * match_rate)
guest_end_num = guest_start_num + guest_data_size
if os.path.isfile(include_path):
with include_path.open("r") as f:
testsuite_config = json.load(f)
else:
raise Exception(f'Input file error, please check{include_path}.')
try:
if output_path is not None:
big_data_dir = os.path.abspath(output_path)
else:
big_data_dir = os.path.abspath(conf.cache_directory)
except Exception:
raise Exception('{}path does not exist'.format(big_data_dir))
date_set = {}
table_name_list = []
table_namespace_list = []
partition_list = []
for upload_dict in testsuite_config.get('data'):
date_set[os.path.basename(upload_dict.get('file'))] = upload_dict.get('role')
table_name_list.append(upload_dict.get('table_name'))
table_namespace_list.append(upload_dict.get('namespace'))
partition_list.append(upload_dict.get('partition', 8))
if parallelize:
with session.Session() as sess:
session_id = str(uuid.uuid1())
sess.init_computing(session_id)
data_save(
data_info=date_set,
table_names=table_name_list,
namespaces=table_namespace_list,
partition_list=partition_list)
else:
data_save(
data_info=date_set,
table_names=table_name_list,
namespaces=table_namespace_list,
partition_list=partition_list)
echo.echo(f'Data storage address, please check{big_data_dir}')
| 16,053 | 45.398844 | 120 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/_options.py
|
import time
import click
from fate_test._config import parse_config, default_config
from fate_test.scripts._utils import _set_namespace
class SharedOptions(object):
_options = {
"config": (('-c', '--config'),
dict(type=click.Path(exists=True), help=f"Manual specify config file", default=None),
default_config().__str__()),
"namespace": (('-n', '--namespace'),
dict(type=str, help=f"Manual specify fate_test namespace", default=None),
time.strftime('%Y%m%d%H%M%S')),
"namespace_mangling": (('-nm', '--namespace-mangling',),
dict(type=bool, is_flag=True, help="Mangling data namespace", default=None),
False),
"yes": (('-y', '--yes',), dict(type=bool, is_flag=True, help="Skip double check", default=None),
False),
"extend_sid": (('--extend_sid', ),
dict(type=bool, is_flag=True, help="whether to append uuid as sid when uploading data",
default=None), None),
"auto_increasing_sid": (('--auto_increasing_sid', ),
dict(type=bool, is_flag=True, help="whether to generate sid value starting at 0",
default=None), None),
}
def __init__(self):
self._options_kwargs = {}
def __getitem__(self, item):
return self._options_kwargs[item]
def get(self, k, default=None):
v = self._options_kwargs.get(k, default)
if v is None and k in self._options:
v = self._options[k][2]
return v
def update(self, **kwargs):
for k, v in kwargs.items():
if v is not None:
self._options_kwargs[k] = v
def post_process(self):
# add defaults here
for k, v in self._options.items():
if self._options_kwargs.get(k, None) is None:
self._options_kwargs[k] = v[2]
# update config
config = parse_config(self._options_kwargs['config'])
self._options_kwargs['config'] = config
_set_namespace(self._options_kwargs['namespace_mangling'], self._options_kwargs['namespace'])
@classmethod
def get_shared_options(cls, hidden=False):
def shared_options(f):
for name, option in cls._options.items():
f = click.option(*option[0], **dict(option[1], hidden=hidden))(f)
return f
return shared_options
| 2,546 | 37.590909 | 113 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/performance_cli.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import time
import uuid
from datetime import timedelta
import click
import glob
from fate_test import _config
from fate_test._client import Clients
from fate_test._config import Config
from fate_test.utils import TxtStyle
from fate_test._flow_client import JobProgress, SubmitJobResponse, QueryJobResponse
from fate_test._io import LOGGER, echo
from prettytable import PrettyTable, ORGMODE
from fate_test._parser import JSON_STRING, Testsuite
from fate_test.scripts._options import SharedOptions
from fate_test.scripts._utils import _load_testsuites, _upload_data, _delete_data, _load_module_from_script, \
_add_replace_hook
@click.command("performance")
@click.option('-t', '--job-type', type=click.Choice(['intersect', 'intersect_multi', 'hetero_lr', 'hetero_sbt']),
help="Select the job type, you can also set through include")
@click.option('-i', '--include', type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *testsuite.json under these paths")
@click.option('-r', '--replace', default="{}", type=JSON_STRING,
help="a json string represents mapping for replacing fields in data/conf/dsl")
@click.option('-m', '--timeout', type=int, default=3600,
help="maximun running time of job")
@click.option('-e', '--max-iter', type=int, help="When the algorithm model is LR, the number of iterations is set")
@click.option('-d', '--max-depth', type=int,
help="When the algorithm model is SecureBoost, set the number of model layers")
@click.option('-nt', '--num-trees', type=int, help="When the algorithm model is SecureBoost, set the number of trees")
@click.option('-p', '--task-cores', type=int, help="processors per node")
@click.option('-uj', '--update-job-parameters', default="{}", type=JSON_STRING,
help="a json string represents mapping for replacing fields in conf.job_parameters")
@click.option('-uc', '--update-component-parameters', default="{}", type=JSON_STRING,
help="a json string represents mapping for replacing fields in conf.component_parameters")
@click.option('-s', '--storage-tag', type=str,
help="tag for storing performance time consuming, for future comparison")
@click.option('-v', '--history-tag', type=str, multiple=True,
help="Extract performance time consuming from history tags for comparison")
@click.option("--skip-data", is_flag=True, default=False,
help="skip uploading data specified in testsuite")
@click.option("--provider", type=str,
help="Select the fate version, for example: [email protected]")
@click.option("--disable-clean-data", "clean_data", flag_value=False, default=None)
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def run_task(ctx, job_type, include, replace, timeout, update_job_parameters, update_component_parameters, max_iter,
max_depth, num_trees, task_cores, storage_tag, history_tag, skip_data, clean_data, provider, **kwargs):
"""
Test the performance of big data tasks, alias: bp
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
config_inst = ctx.obj["config"]
if ctx.obj["extend_sid"] is not None:
config_inst.extend_sid = ctx.obj["extend_sid"]
if ctx.obj["auto_increasing_sid"] is not None:
config_inst.auto_increasing_sid = ctx.obj["auto_increasing_sid"]
namespace = ctx.obj["namespace"]
yes = ctx.obj["yes"]
data_namespace_mangling = ctx.obj["namespace_mangling"]
if clean_data is None:
clean_data = config_inst.clean_data
def get_perf_template(conf: Config, job_type):
perf_dir = os.path.join(os.path.abspath(conf.perf_template_dir) + '/' + job_type + '/' + "*testsuite.json")
return glob.glob(perf_dir)
if not include:
include = get_perf_template(config_inst, job_type)
# prepare output dir and json hooks
_add_replace_hook(replace)
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo("loading testsuites:")
suites = _load_testsuites(includes=include, excludes=tuple(), glob=None, provider=provider)
for i, suite in enumerate(suites):
echo.echo(f"\tdataset({len(suite.dataset)}) dsl jobs({len(suite.jobs)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
echo.stdout_newline()
with Clients(config_inst) as client:
for i, suite in enumerate(suites):
# noinspection PyBroadException
try:
start = time.time()
echo.echo(f"[{i + 1}/{len(suites)}]start at {time.strftime('%Y-%m-%d %X')} {suite.path}", fg='red')
if not skip_data:
try:
_upload_data(client, suite, config_inst)
except Exception as e:
raise RuntimeError(f"exception occur while uploading data for {suite.path}") from e
echo.stdout_newline()
try:
time_consuming = _submit_job(client, suite, namespace, config_inst, timeout, update_job_parameters,
storage_tag, history_tag, update_component_parameters, max_iter,
max_depth, num_trees, task_cores)
except Exception as e:
raise RuntimeError(f"exception occur while submit job for {suite.path}") from e
try:
_run_pipeline_jobs(config_inst, suite, namespace, data_namespace_mangling)
except Exception as e:
raise RuntimeError(f"exception occur while running pipeline jobs for {suite.path}") from e
echo.echo(f"[{i + 1}/{len(suites)}]elapse {timedelta(seconds=int(time.time() - start))}", fg='red')
if not skip_data and clean_data:
_delete_data(client, suite)
echo.echo(suite.pretty_final_summary(time_consuming), fg='red')
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception in {suite.path}, exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
def _submit_job(clients: Clients, suite: Testsuite, namespace: str, config: Config, timeout, update_job_parameters,
storage_tag, history_tag, update_component_parameters, max_iter, max_depth, num_trees, task_cores):
# submit jobs
with click.progressbar(length=len(suite.jobs),
label="jobs",
show_eta=False,
show_pos=True,
width=24) as bar:
time_list = []
for job in suite.jobs_iter():
start = time.time()
job_progress = JobProgress(job.job_name)
def _raise():
exception_id = str(uuid.uuid1())
job_progress.exception(exception_id)
suite.update_status(job_name=job.job_name, exception_id=exception_id)
echo.file(f"exception({exception_id})")
LOGGER.exception(f"exception id: {exception_id}")
# noinspection PyBroadException
try:
if max_iter is not None:
job.job_conf.update_component_parameters('max_iter', max_iter)
if max_depth is not None:
job.job_conf.update_component_parameters('max_depth', max_depth)
if num_trees is not None:
job.job_conf.update_component_parameters('num_trees', num_trees)
if task_cores is not None:
job.job_conf.update_job_common_parameters(task_cores=task_cores)
job.job_conf.update(config.parties, timeout, update_job_parameters, update_component_parameters)
except Exception:
_raise()
continue
def update_bar(n_step):
bar.item_show_func = lambda x: job_progress.show()
time.sleep(0.1)
bar.update(n_step)
update_bar(1)
def _call_back(resp: SubmitJobResponse):
if isinstance(resp, SubmitJobResponse):
job_progress.submitted(resp.job_id)
echo.file(f"[jobs] {resp.job_id} ", nl=False)
suite.update_status(job_name=job.job_name, job_id=resp.job_id)
if isinstance(resp, QueryJobResponse):
job_progress.running(resp.status, resp.progress)
update_bar(0)
# noinspection PyBroadException
try:
response = clients["guest_0"].submit_job(job=job, callback=_call_back)
# noinspection PyBroadException
try:
# add notes
notes = f"{job.job_name}@{suite.path}@{namespace}"
for role, party_id_list in job.job_conf.role.items():
for i, party_id in enumerate(party_id_list):
clients[f"{role}_{i}"].add_notes(job_id=response.job_id, role=role, party_id=party_id,
notes=notes)
except Exception:
pass
except Exception:
_raise()
else:
job_progress.final(response.status)
suite.update_status(job_name=job.job_name, status=response.status.status)
if response.status.is_success():
if suite.model_in_dep(job.job_name):
dependent_jobs = suite.get_dependent_jobs(job.job_name)
for predict_job in dependent_jobs:
model_info, table_info, cache_info, model_loader_info = None, None, None, None
for i in _config.deps_alter[predict_job.job_name]:
if isinstance(i, dict):
name = i.get('name')
data_pre = i.get('data')
if 'data_deps' in _config.deps_alter[predict_job.job_name]:
roles = list(data_pre.keys())
table_info, hierarchy = [], []
for role_ in roles:
role, index = role_.split("_")
input_ = data_pre[role_]
for data_input, cpn in input_.items():
try:
table_name = clients["guest_0"].output_data_table(
job_id=response.job_id,
role=role,
party_id=config.role[role][int(index)],
component_name=cpn)
except Exception:
_raise()
if predict_job.job_conf.dsl_version == 2:
hierarchy.append([role, index, data_input])
table_info.append({'table': table_name})
else:
hierarchy.append([role, 'args', 'data'])
table_info.append({data_input: [table_name]})
table_info = {'hierarchy': hierarchy, 'table_info': table_info}
if 'model_deps' in _config.deps_alter[predict_job.job_name]:
if predict_job.job_conf.dsl_version == 2:
# noinspection PyBroadException
try:
model_info = clients["guest_0"].deploy_model(
model_id=response.model_info["model_id"],
model_version=response.model_info["model_version"],
dsl=predict_job.job_dsl.as_dict())
except Exception:
_raise()
else:
model_info = response.model_info
if 'cache_deps' in _config.deps_alter[predict_job.job_name]:
cache_dsl = predict_job.job_dsl.as_dict()
cache_info = []
for cpn in cache_dsl.get("components").keys():
if "CacheLoader" in cache_dsl.get("components").get(cpn).get("module"):
cache_info.append({cpn: {'job_id': response.job_id}})
cache_info = {'hierarchy': [""], 'cache_info': cache_info}
if 'model_loader_deps' in _config.deps_alter[predict_job.job_name]:
model_loader_dsl = predict_job.job_dsl.as_dict()
model_loader_info = []
for cpn in model_loader_dsl.get("components").keys():
if "ModelLoader" in model_loader_dsl.get("components").get(cpn).get("module"):
model_loader_info.append({cpn: response.model_info})
model_loader_info = {'hierarchy': [""], 'model_loader_info': model_loader_info}
suite.feed_dep_info(predict_job, name, model_info=model_info, table_info=table_info,
cache_info=cache_info, model_loader_info=model_loader_info)
suite.remove_dependency(job.job_name)
update_bar(0)
time_consuming = time.time() - start
performance_dir = "/".join(
[os.path.join(os.path.abspath(config.cache_directory), 'benchmark_history', "performance.json")])
fate_version = clients["guest_0"].get_version()
if history_tag:
history_tag = ["_".join([i, job.job_name]) for i in history_tag]
comparison_quality(job.job_name, history_tag, performance_dir, time_consuming)
if storage_tag:
storage_tag = "_".join(['FATE', fate_version, storage_tag, job.job_name])
save_quality(storage_tag, performance_dir, time_consuming)
echo.stdout_newline()
time_list.append(time_consuming)
return [str(int(i)) + "s" for i in time_list]
def _run_pipeline_jobs(config: Config, suite: Testsuite, namespace: str, data_namespace_mangling: bool):
# pipeline demo goes here
job_n = len(suite.pipeline_jobs)
for i, pipeline_job in enumerate(suite.pipeline_jobs):
echo.echo(f"Running [{i + 1}/{job_n}] job: {pipeline_job.job_name}")
def _raise(err_msg, status="failed"):
exception_id = str(uuid.uuid1())
suite.update_status(job_name=job_name, exception_id=exception_id, status=status)
echo.file(f"exception({exception_id}), error message:\n{err_msg}")
# LOGGER.exception(f"exception id: {exception_id}")
job_name, script_path = pipeline_job.job_name, pipeline_job.script_path
mod = _load_module_from_script(script_path)
try:
if data_namespace_mangling:
try:
mod.main(config=config, namespace=f"_{namespace}")
suite.update_status(job_name=job_name, status="success")
except Exception as e:
_raise(e)
continue
else:
try:
mod.main(config=config)
suite.update_status(job_name=job_name, status="success")
except Exception as e:
_raise(e)
continue
except Exception as e:
_raise(e, status="not submitted")
continue
def comparison_quality(group_name, history_tags, history_info_dir, time_consuming):
assert os.path.exists(history_info_dir), f"Please check the {history_info_dir} Is it deleted"
with open(history_info_dir, 'r') as f:
benchmark_quality = json.load(f, object_hook=dict)
benchmark_performance = {}
for history_tag in history_tags:
for tag in benchmark_quality:
if '_'.join(tag.split("_")[2:]) == history_tag:
benchmark_performance[tag] = benchmark_quality[tag]
if benchmark_performance is not None:
benchmark_performance[group_name] = time_consuming
table = PrettyTable()
table.set_style(ORGMODE)
table.field_names = ["Script Model Name", "time consuming"]
for script_model_name in benchmark_performance:
table.add_row([f"{script_model_name}"] +
[f"{TxtStyle.FIELD_VAL}{benchmark_performance[script_model_name]}{TxtStyle.END}"])
print("\n")
print(table.get_string(title=f"{TxtStyle.TITLE}Performance comparison results{TxtStyle.END}"))
print("#" * 60)
def save_quality(storage_tag, save_dir, time_consuming):
os.makedirs(os.path.dirname(save_dir), exist_ok=True)
if os.path.exists(save_dir):
with open(save_dir, 'r') as f:
benchmark_quality = json.load(f, object_hook=dict)
else:
benchmark_quality = {}
benchmark_quality.update({storage_tag: time_consuming})
try:
with open(save_dir, 'w') as fp:
json.dump(benchmark_quality, fp, indent=2)
print("\n" + "Storage successful, please check: ", save_dir)
except Exception:
print("\n" + "Storage failed, please check: ", save_dir)
| 18,821 | 50.146739 | 119 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/op_test/spdz_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import time
from prettytable import PrettyTable, ORGMODE
from flow_sdk.client import FlowClient
class SPDZTest(object):
def __init__(self, flow_address, params, conf_path, dsl_path, guest_party_id, host_party_id):
self.client = FlowClient(ip=flow_address.split(":")[0],
port=flow_address.split(":")[1],
version="v1")
self.dsl = self._get_json_file(dsl_path)
self.conf = self._get_json_file(conf_path)
self.conf["role"] = dict(guest=guest_party_id, host=host_party_id)
self.conf["component_parameters"]["common"]["spdz_test_0"].update(params)
self.conf["initiator"]["party_id"] = guest_party_id[0]
self.guest_party_id = guest_party_id[0]
@staticmethod
def _get_json_file(path):
with open(path, "r") as fin:
ret = json.loads(fin.read())
return ret
def run(self):
result = self.client.job.submit(config_data=self.conf, dsl_data=self.dsl)
try:
if 'retcode' not in result or result["retcode"] != 0:
raise ValueError(f"retcode err")
if "jobId" not in result:
raise ValueError(f"jobID not in result: {result}")
job_id = result["jobId"]
except ValueError:
raise ValueError("job submit failed, err msg: {}".format(result))
while True:
info = self.client.job.query(job_id=job_id, role="guest", party_id=self.guest_party_id)
data = info["data"][0]
status = data["f_status"]
if status == "success":
break
elif status == "failed":
raise ValueError(f"job is failed, jobid is {job_id}")
time.sleep(1)
summary = self.client.component.get_summary(job_id=job_id, role="guest",
party_id=self.guest_party_id,
component_name="spdz_test_0")
summary = summary["data"]
field_name = summary["field_name"]
tables = []
for tensor_type in summary["tensor_type"]:
table = PrettyTable()
table.set_style(ORGMODE)
table.field_names = field_name
for op_type in summary["op_test_list"]:
table.add_row(summary[tensor_type][op_type])
tables.append(table.get_string(title=f"SPDZ {tensor_type} Computational performance"))
return tables
| 3,153 | 36.105882 | 99 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/op_test/fate_he_performance_test.py
|
import numpy as np
from prettytable import PrettyTable, ORGMODE
from fate_test.scripts.op_test.performance_assess import Metric
from operator import add, mul
class PaillierAssess(object):
def __init__(self, method, data_num, test_round):
if method == "Paillier":
from federatedml.secureprotol.fate_paillier import PaillierKeypair
self.is_ipcl = False
elif method == "IPCL":
try:
from ipcl_python import PaillierKeypair
self.is_ipcl = True
except ImportError:
raise ValueError("IPCL is not supported.")
else:
raise ValueError(f"Unsupported Paillier method: {method}.")
self.public_key, self.private_key = PaillierKeypair.generate_keypair()
self.method = method
self.data_num = data_num
self.test_round = test_round
self.float_data_x, self.encrypt_float_data_x, self.int_data_x, self.encrypt_int_data_x = self._get_data()
self.float_data_y, self.encrypt_float_data_y, self.int_data_y, self.encrypt_int_data_y = self._get_data()
def _get_data(self, type_int=True, type_float=True):
if self.method in ["Paillier", "IPCL"]:
key = self.public_key
else:
key = None
encrypt_float_data = []
encrypt_int_data = []
float_data = np.random.uniform(-1e9, 1e9, size=self.data_num)
int_data = np.random.randint(-1000, 1000, size=self.data_num)
if self.is_ipcl:
if type_float:
encrypt_float_data = key.encrypt(float_data)
if type_int:
encrypt_int_data = key.encrypt(int_data)
else:
if type_float:
for i in float_data:
encrypt_float_data.append(key.encrypt(i))
if type_int:
for i in int_data:
encrypt_int_data.append(key.encrypt(i))
return float_data, encrypt_float_data, int_data, encrypt_int_data
def output_table(self):
table = PrettyTable()
table.set_style(ORGMODE)
table.field_names = [self.method, "One time consumption", f"{self.data_num} times consumption",
"relative acc", "log2 acc", "operations per second", "plaintext consumption per second"]
metric = Metric(data_num=self.data_num, test_round=self.test_round)
table.add_row(metric.encrypt(self.float_data_x, self.public_key.encrypt, is_ipcl=self.is_ipcl))
decrypt_data = self.private_key.decrypt(self.encrypt_float_data_x) if self.is_ipcl else [
self.private_key.decrypt(i) for i in self.encrypt_float_data_x]
table.add_row(metric.decrypt(self.encrypt_float_data_x, self.float_data_x, decrypt_data,
self.private_key.decrypt, is_ipcl=self.is_ipcl))
real_data = list(map(add, self.float_data_x, self.float_data_y))
if self.is_ipcl:
encrypt_data = self.encrypt_float_data_x + self.encrypt_float_data_y
else:
encrypt_data = list(map(add, self.encrypt_float_data_x, self.encrypt_float_data_y))
self.binary_op(table, metric, self.encrypt_float_data_x, self.encrypt_float_data_y,
self.float_data_x, self.float_data_y, real_data, encrypt_data,
add, "float add")
real_data = list(map(add, self.int_data_x, self.int_data_y))
if self.is_ipcl:
encrypt_data = self.encrypt_int_data_x + self.encrypt_int_data_y
else:
encrypt_data = list(map(add, self.encrypt_int_data_x, self.encrypt_int_data_y))
self.binary_op(table, metric, self.encrypt_int_data_x, self.encrypt_int_data_y,
self.int_data_x, self.int_data_y, real_data, encrypt_data,
add, "int add")
real_data = list(map(mul, self.float_data_x, self.float_data_y))
if self.is_ipcl:
encrypt_data = self.encrypt_float_data_x * self.float_data_y
else:
encrypt_data = list(map(mul, self.encrypt_float_data_x, self.float_data_y))
self.binary_op(table, metric, self.encrypt_float_data_x, self.float_data_y,
self.float_data_x, self.float_data_y, real_data, encrypt_data,
mul, "float mul")
real_data = list(map(mul, self.int_data_x, self.int_data_y))
if self.is_ipcl:
encrypt_data = self.encrypt_int_data_x * self.int_data_y
else:
encrypt_data = list(map(mul, self.encrypt_int_data_x, self.int_data_y))
self.binary_op(table, metric, self.encrypt_int_data_x, self.int_data_y,
self.int_data_x, self.int_data_y, real_data, encrypt_data,
mul, "int mul")
return table.get_string(title=f"{self.method} Computational performance")
def binary_op(self, table, metric, encrypt_data_x, encrypt_data_y, raw_data_x, raw_data_y,
real_data, encrypt_data, op, op_name):
decrypt_data = self.private_key.decrypt(encrypt_data) if self.is_ipcl else [
self.private_key.decrypt(i) for i in encrypt_data]
table.add_row(metric.binary_op(encrypt_data_x, encrypt_data_y,
raw_data_x, raw_data_y,
real_data, decrypt_data,
op, op_name, is_ipcl=self.is_ipcl))
| 5,476 | 47.901786 | 117 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/op_test/performance_assess.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import numpy as np
# Operations
class Metric(object):
def __init__(self, data_num, test_round):
self.operation = None
self.data_num = data_num
self.test_round = test_round
@staticmethod
def accuracy(rand_data, decrypt_data):
difference = 0
for x, y in zip(rand_data, decrypt_data):
difference += abs(abs(x) - abs(y))
abs_acc = abs(difference) / len(rand_data)
difference = 0
for x, y in zip(rand_data, decrypt_data):
difference += abs(abs(x) - abs(y)) / (1e-100 + max(abs(x), abs(y)))
relative_acc = difference / len(rand_data)
log_acc = -np.log2(relative_acc) if relative_acc != 0 else 0
return abs_acc, relative_acc, log_acc
@staticmethod
def many_call(data_x, unary_op=None, binary_op=None, data_y=None, test_round=1):
if unary_op is not None:
time_start = time.perf_counter()
for _ in range(test_round):
_ = list(map(unary_op, data_x))
final_time = time.perf_counter() - time_start
else:
time_start = time.time()
for _ in range(test_round):
_ = list(map(binary_op, data_x, data_y))
final_time = time.time() - time_start
return final_time / test_round
@staticmethod
def many_call_ipcl(data_x, unary_op=None, binary_op=None, data_y=None, test_round=1):
if unary_op is not None:
time_start = time.time()
for _ in range(test_round):
_ = unary_op(data_x)
final_time = time.time() - time_start
else:
time_start = time.time()
for _ in range(test_round):
_ = binary_op(data_x, data_y)
final_time = time.time() - time_start
return final_time / test_round
def encrypt(self, data, op, is_ipcl=False):
if is_ipcl:
many_round_encrypt_time = self.many_call_ipcl(data, unary_op=op, test_round=self.test_round)
else:
many_round_encrypt_time = self.many_call(data, unary_op=op, test_round=self.test_round)
single_encrypt_time = many_round_encrypt_time / self.data_num
cals_per_second = self.data_num / many_round_encrypt_time
return ["encrypt", '%.10f' % single_encrypt_time + 's', '%.10f' % many_round_encrypt_time + 's', "-", "-",
int(cals_per_second), "-"]
def decrypt(self, encrypt_data, data, decrypt_data, function, is_ipcl=False):
if is_ipcl:
many_round_decrypt_time = self.many_call_ipcl(encrypt_data, function, test_round=self.test_round)
else:
many_round_decrypt_time = self.many_call(encrypt_data, function, test_round=self.test_round)
single_decrypt_time = many_round_decrypt_time / self.data_num
cals_per_second = self.data_num / many_round_decrypt_time
abs_acc, relative_acc, log_acc = self.accuracy(data, decrypt_data)
return ["decrypt", '%.10f' % single_decrypt_time + 's', '%.10f' % many_round_decrypt_time + 's',
relative_acc, log_acc, int(cals_per_second), "-"]
def binary_op(self, encrypt_data_x, encrypt_data_y,
raw_data_x, raw_data_y, real_ret, decrypt_ret, op, op_name, is_ipcl=False):
if is_ipcl:
many_round_time = self.many_call_ipcl(data_x=encrypt_data_x, binary_op=op,
data_y=encrypt_data_y, test_round=self.test_round)
else:
many_round_time = self.many_call(data_x=encrypt_data_x, binary_op=op,
data_y=encrypt_data_y, test_round=self.test_round)
single_op_time = many_round_time / self.data_num
cals_per_second = self.data_num / many_round_time
plaintext_per_second = self.data_num / self.many_call(data_x=raw_data_x, data_y=raw_data_y,
binary_op=op, test_round=self.test_round)
abs_acc, relative_acc, log_acc = self.accuracy(real_ret, decrypt_ret)
return [op_name, '%.10f' % single_op_time + 's', '%.10f' % many_round_time + 's',
relative_acc, log_acc, int(cals_per_second), int(plaintext_per_second)]
| 4,908 | 43.225225 | 114 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/op_test/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/fate_test/fate_test/scripts/op_test/spdz_conf/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/fate_test/fate_test/flow_test/flow_rest_api.py
|
import json
import os
import shutil
import time
import numpy as np
from pathlib import Path
import requests
from contextlib import closing
from prettytable import PrettyTable, ORGMODE
from fate_test.flow_test.flow_process import Base, get_dict_from_file, download_from_request, serving_connect
class TestModel(Base):
def __init__(self, data_base_dir, server_url, component_name, namespace):
super().__init__(data_base_dir, server_url, component_name)
self.request_api_info_path = f'./logs/{namespace}/cli_exception.log'
os.makedirs(os.path.dirname(self.request_api_info_path), exist_ok=True)
def error_log(self, retmsg):
if retmsg is None:
return os.path.abspath(self.request_api_info_path)
with open(self.request_api_info_path, "a") as f:
f.write(retmsg)
def submit_job(self, stop=True):
post_data = {'job_runtime_conf': self.config, 'job_dsl': self.dsl}
try:
response = requests.post("/".join([self.server_url, "job", "submit"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('job submit: {}'.format(response.json().get('retmsg')) + '\n')
self.job_id = response.json().get("jobId")
self.model_id = response.json().get("data").get("model_info").get("model_id")
self.model_version = response.json().get("data").get("model_info").get("model_version")
if stop:
return
return self.query_status(self.job_id)
except Exception:
return
def job_dsl_generate(self):
post_data = {
'train_dsl': '{"components": {"data_transform_0": {"module": "DataTransform", "input": {"data": {"data": []}},'
'"output": {"data": ["train"], "model": ["data_transform"]}}}}',
'cpn_str': 'data_transform_0'}
try:
response = requests.post("/".join([self.server_url, "job", "dsl/generate"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('job dsl generate: {}'.format(response.json().get('retmsg')) + '\n')
if response.json().get('data')['components']['data_transform_0']['input']['model'][
0] == 'pipeline.data_transform_0.data_transform':
return response.json().get('retcode')
except Exception:
return
def job_api(self, command, output_path=None):
post_data = {'job_id': self.job_id, "role": "guest"}
if command == 'rerun':
try:
response = requests.post("/".join([self.server_url, "job", command]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('job rerun: {}'.format(response.json().get('retmsg')) + '\n')
return self.query_status(self.job_id)
except Exception:
return
elif command == 'stop':
self.submit_job()
time.sleep(5)
try:
response = requests.post("/".join([self.server_url, "job", command]), json={'job_id': self.job_id})
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('job stop: {}'.format(response.json().get('retmsg')) + '\n')
if self.query_job() == "canceled":
return response.json().get('retcode')
except Exception:
return
elif command == 'data/view/query':
try:
response = requests.post("/".join([self.server_url, "job", command]), json=post_data)
if response.json().get('retcode'):
self.error_log('data view query: {}'.format(response.json().get('retmsg')) + '\n')
if len(response.json().get("data")) == len(self.dsl['components'].keys()) - 1:
return response.json().get('retcode')
except Exception:
return
elif command == 'list/job':
post_data = {'limit': 3}
try:
response = requests.post("/".join([self.server_url, "job", "list/job"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('job list: {}'.format(response.json().get('retmsg')) + '\n')
if len(response.json().get('data', {}).get('jobs', [])) == post_data["limit"]:
return response.json().get('retcode')
except Exception:
return
elif command == 'log/download':
post_data = {'job_id': self.job_id}
tar_file_name = 'job_{}_log.tar.gz'.format(post_data['job_id'])
extract_dir = os.path.join(output_path, tar_file_name.replace('.tar.gz', ''))
with closing(requests.post("/".join([self.server_url, "job", command]), json=post_data, stream=True)) as response:
if response.status_code == 200:
try:
download_from_request(http_response=response, tar_file_name=tar_file_name,
extract_dir=extract_dir)
return 0
except Exception as e:
self.error_log('job log: {}'.format(e) + '\n')
return
elif command == 'clean/queue':
try:
response = requests.post("/".join([self.server_url, "job", command]))
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('clean queue: {}'.format(response.json().get('retmsg')) + '\n')
if not self.query_job(queue=True):
return response.json().get('retcode')
except Exception:
return
def query_job(self, job_id=None, queue=False):
if job_id is None:
job_id = self.job_id
time.sleep(1)
try:
if not queue:
response = requests.post("/".join([self.server_url, "job", "query"]), json={'job_id': job_id})
if response.status_code == 200 and response.json().get("data"):
status = response.json().get("data")[0].get("f_status")
return status
else:
self.error_log('query job: {}'.format(response.json().get('retmsg')) + '\n')
else:
response = requests.post("/".join([self.server_url, "job", "query"]), json={'status': 'waiting'})
if response.status_code == 200 and response.json().get("data"):
return len(response.json().get("data"))
except Exception:
return
def job_config(self, max_iter, output_path):
post_data = {
'job_id': self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"output_path": output_path
}
try:
response = requests.post("/".join([self.server_url, "job", "config"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('job config: {}'.format(response.json().get('retmsg')) + '\n')
job_conf = response.json().get('data')['runtime_conf']
if max_iter == job_conf['component_parameters']['common'][self.component_name]['max_iter']:
return response.json().get('retcode')
except Exception:
return
def query_task(self):
post_data = {
'job_id': self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
try:
response = requests.post("/".join([self.server_url, "job", "task/query"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('task query: {}'.format(response.json().get('retmsg')) + '\n')
status = response.json().get("data")[0].get("f_status")
if status == "success":
return response.json().get('retcode')
except Exception:
return
def list_task(self):
post_data = {'limit': 3}
try:
response = requests.post("/".join([self.server_url, "job", "list/task"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('list task: {}'.format(response.json().get('retmsg')) + '\n')
if len(response.json().get('data', {}).get('tasks', [])) == post_data["limit"]:
return response.json().get('retcode')
except Exception:
return
def component_api(self, command, output_path=None, max_iter=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
if command == 'output/data':
tar_file_name = 'job_{}_{}_output_data.tar.gz'.format(post_data['job_id'], post_data['component_name'])
extract_dir = os.path.join(output_path, tar_file_name.replace('.tar.gz', ''))
with closing(requests.get("/".join([self.server_url, "tracking", "component/output/data/download"]),
json=post_data, stream=True)) as response:
if response.status_code == 200:
try:
download_from_request(http_response=response, tar_file_name=tar_file_name,
extract_dir=extract_dir)
return 0
except Exception as e:
self.error_log('component output data: {}'.format(e) + '\n')
return
elif command == 'output/data/table':
try:
response = requests.post("/".join([self.server_url, "tracking", "component/output/data/table"]),
json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log(
'component output data table: {}'.format(response.json().get('retmsg')) + '\n')
table = {'table_name': response.json().get("data")[0].get("table_name"),
'namespace': response.json().get("data")[0].get("namespace")}
if not self.table_api('table_info', table):
return response.json().get('retcode')
except Exception:
return
elif command == 'output/model':
try:
response = requests.post("/".join([self.server_url, "tracking", "component/output/model"]),
json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('component output model: {}'.format(response.json().get('retmsg')) + '\n')
if response.json().get("data"):
return response.json().get('retcode')
except Exception:
return
elif command == 'parameters':
try:
response = requests.post("/".join([self.server_url, "tracking", "component/parameters"]),
json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('component parameters: {}'.format(response.json().get('retmsg')) + '\n')
if response.json().get('data', {}).get('ComponentParam', {}).get('max_iter', {}) == max_iter:
return response.json().get('retcode')
except Exception:
return
elif command == 'summary/download':
try:
response = requests.post("/".join([self.server_url, "tracking", "component/summary/download"]),
json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log(
'component summary download: {}'.format(response.json().get('retmsg')) + '\n')
if response.json().get("data"):
file = output_path + '{}_summary.json'.format(self.job_id)
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as fp:
json.dump(response.json().get("data"), fp)
return response.json().get('retcode')
except Exception:
return
def component_metric(self, command, output_path=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": 'evaluation_0'
}
if command == 'metrics':
try:
response = requests.post("/".join([self.server_url, "tracking", "component/metrics"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('component metrics: {}'.format(response.json().get('retmsg')) + '\n')
if response.json().get("data"):
file = output_path + '{}_metrics.json'.format(self.job_id)
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as fp:
json.dump(response.json().get("data"), fp)
return response.json().get('retcode')
except Exception:
return
elif command == 'metric/all':
try:
response = requests.post("/".join([self.server_url, "tracking", "component/metric/all"]),
json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('component metric all: {}'.format(response.json().get('retmsg')) + '\n')
if response.json().get("data"):
file = output_path + '{}_metric_all.json'.format(self.job_id)
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as fp:
json.dump(response.json().get("data"), fp)
return response.json().get('retcode')
except Exception:
return
elif command == 'metric/delete':
try:
response = requests.post("/".join([self.server_url, "tracking", "component/metric/delete"]),
json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('component metric delete: {}'.format(response.json().get('retmsg')) + '\n')
response = requests.post("/".join([self.server_url, "tracking", "component/metrics"]),
json=post_data)
if response.status_code == 200:
if not response.json().get("data"):
return response.json().get('retcode')
except Exception:
return
def component_list(self):
post_data = {'job_id': self.job_id}
try:
response = requests.post("/".join([self.server_url, "tracking", "component/list"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('component list: {}'.format(response.json().get('retmsg')) + '\n')
if len(response.json().get('data')['components']) == len(list(self.dsl['components'].keys())):
return response.json().get('retcode')
except Exception:
raise
def table_api(self, command, table_name):
post_data = {
"table_name": table_name['table_name'],
"namespace": table_name['namespace']
}
if command == 'table/info':
try:
response = requests.post("/".join([self.server_url, "table", "table_info"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('table info: {}'.format(response.json().get('retmsg')) + '\n')
if response.json().get('data')['namespace'] == table_name['namespace'] and \
response.json().get('data')['table_name'] == table_name['table_name']:
return response.json().get('retcode')
except Exception:
return
elif command == 'table/delete':
try:
response = requests.post("/".join([self.server_url, "table", "delete"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('table delete: {}'.format(response.json().get('retmsg')) + '\n')
response = requests.post("/".join([self.server_url, "table", "delete"]), json=post_data)
if response.status_code == 200 and response.json().get('retcode'):
return 0
except Exception:
return
def data_upload(self, post_data, table_index=None):
post_data['file'] = str(self.data_base_dir.joinpath(post_data['file']).resolve())
post_data['drop'] = 1
post_data['use_local_data'] = 0
if table_index is not None:
post_data['table_name'] = f'{post_data["file"]}_{table_index}'
try:
response = requests.post("/".join([self.server_url, "data", "upload"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('data upload: {}'.format(response.json().get('retmsg')) + '\n')
return self.query_status(response.json().get("jobId"))
except Exception:
return
def data_download(self, table_name, output_path):
post_data = {
"table_name": table_name['table_name'],
"namespace": table_name['namespace'],
"output_path": output_path + '{}download.csv'.format(self.job_id)
}
try:
response = requests.post("/".join([self.server_url, "data", "download"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('data download: {}'.format(response.json().get('retmsg')) + '\n')
return self.query_status(response.json().get("jobId"))
except Exception:
return
def data_upload_history(self, conf_file):
self.data_upload(conf_file, table_index=1)
post_data = {"limit": 2}
try:
response = requests.post("/".join([self.server_url, "data", "upload/history"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('data upload history: {}'.format(response.json().get('retmsg')) + '\n')
if len(response.json().get('data')) == post_data["limit"]:
return response.json().get('retcode')
except Exception:
return
def tag_api(self, command, tag_name=None, new_tag_name=None):
post_data = {
"tag_name": tag_name
}
if command == 'tag/retrieve':
try:
response = requests.post("/".join([self.server_url, "model", "tag/retrieve"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('tag retrieve: {}'.format(response.json().get('retmsg')) + '\n')
if not response.json().get('retcode'):
return response.json().get('data')['tags'][0]['name']
except Exception:
return
elif command == 'tag/create':
try:
response = requests.post("/".join([self.server_url, "model", "tag/create"]), json=post_data)
if response.status_code == 200:
self.error_log('tag create: {}'.format(response.json().get('retmsg')) + '\n')
if self.tag_api('tag/retrieve', tag_name=tag_name) == tag_name:
return 0
except Exception:
return
elif command == 'tag/destroy':
try:
response = requests.post("/".join([self.server_url, "model", "tag/destroy"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('tag destroy: {}'.format(response.json().get('retmsg')) + '\n')
if not self.tag_api('tag/retrieve', tag_name=tag_name):
return 0
except Exception:
return
elif command == 'tag/update':
post_data = {
"tag_name": tag_name,
"new_tag_name": new_tag_name
}
try:
response = requests.post("/".join([self.server_url, "model", "tag/update"]), json=post_data)
if response.status_code == 200:
self.error_log('tag update: {}'.format(response.json().get('retmsg')) + '\n')
if self.tag_api('tag/retrieve', tag_name=new_tag_name) == new_tag_name:
return 0
except Exception:
return
elif command == 'tag/list':
post_data = {"limit": 1}
try:
response = requests.post("/".join([self.server_url, "model", "tag/list"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('tag list: {}'.format(response.json().get('retmsg')) + '\n')
if len(response.json().get('data')['tags']) == post_data['limit']:
return response.json().get('retcode')
except Exception:
return
def model_api(
self,
command,
output_path=None,
remove_path=None,
model_path=None,
homo_deploy_path=None,
homo_deploy_kube_config_path=None,
arbiter_party_id=None,
tag_name=None,
model_load_conf=None,
servings=None):
if model_load_conf is not None:
model_load_conf["job_parameters"].update({"model_id": self.model_id,
"model_version": self.model_version})
if command == 'model/load':
try:
response = requests.post("/".join([self.server_url, "model", "load"]), json=model_load_conf)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model load: {}'.format(response.json().get('retmsg')) + '\n')
return response.json().get('retcode')
except Exception:
return
elif command == 'model/bind':
service_id = "".join([str(i) for i in np.random.randint(9, size=8)])
post_data = model_load_conf.update({"service_id": service_id, "servings": [servings]})
try:
response = requests.post("/".join([self.server_url, "model", "bind"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model bind: {}'.format(response.json().get('retmsg')) + '\n')
return response.json().get('retcode')
except Exception:
return
elif command == 'model/import':
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
"file": model_path,
"force_update": 1,
}
try:
remove_path = Path(remove_path + self.model_version)
if os.path.exists(model_path):
files = {'file': open(model_path, 'rb')}
else:
return
if os.path.isdir(remove_path):
shutil.rmtree(remove_path)
response = requests.post("/".join([self.server_url, "model", "import"]), data=config_data, files=files)
if response.status_code == 200:
if os.path.isdir(remove_path):
return 0
except Exception:
return
elif command == 'model/export':
post_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
}
tar_file_name = '{}_{}_model_export.zip'.format(post_data['model_id'], post_data['model_version'])
archive_file_path = os.path.join(output_path, tar_file_name)
with closing(requests.get("/".join([self.server_url, "model", "export"]), json=post_data,
stream=True)) as response:
if response.status_code == 200:
try:
with open(archive_file_path, 'wb') as fw:
for chunk in response.iter_content(1024):
if chunk:
fw.write(chunk)
except Exception:
return
return 0, archive_file_path
elif command == 'model/migrate':
post_data = {
"job_parameters": {
"federated_mode": "MULTIPLE"
},
"migrate_initiator": {
"role": "guest",
"party_id": self.guest_party_id[0]
},
"role": {
"guest": self.guest_party_id,
"arbiter": arbiter_party_id,
"host": self.host_party_id
},
"migrate_role": {
"guest": self.guest_party_id,
"arbiter": arbiter_party_id,
"host": self.host_party_id
},
"execute_party": {
"guest": self.guest_party_id,
"arbiter": arbiter_party_id,
"host": self.host_party_id
},
"model_id": self.model_id,
"model_version": self.model_version,
"unify_model_version": self.job_id + '_01'
}
try:
response = requests.post("/".join([self.server_url, "model", "migrate"]), json=post_data)
if response.status_code == 200:
self.error_log('model migrate: {}'.format(response.json().get('retmsg')) + '\n')
return response.json().get("retcode")
except Exception:
return
elif command == 'model/homo/convert':
post_data = {
'model_id': self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
}
try:
response = requests.post("/".join([self.server_url, "model", "homo/convert"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model homo convert: {}'.format(response.json().get('retmsg')) + '\n')
return response.json().get("retcode")
except Exception:
return
elif command == 'model/homo/deploy':
job_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
config_data = get_dict_from_file(homo_deploy_path)
config_data.update(job_data)
if homo_deploy_kube_config_path:
with open(homo_deploy_kube_config_path, 'r') as fp:
config_data['deployment_parameters']['config_file_content'] = fp.read()
config_data['deployment_parameters'].pop('config_file', None)
try:
response = requests.post("/".join([self.server_url, "model", "homo/deploy"]), json=config_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model homo deploy: {}'.format(response.json().get('retmsg')) + '\n')
return response.json().get("retcode")
except Exception:
return
elif command == 'model_tag/create':
post_data = {
"job_id": self.job_id,
"tag_name": tag_name
}
try:
response = requests.post("/".join([self.server_url, "model", "model_tag/create"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model tag create: {}'.format(response.json().get('retmsg')) + '\n')
if self.model_api('model_tag/retrieve')[0].get('name') == post_data['tag_name']:
return 0
except Exception:
return
elif command == 'model_tag/remove':
post_data = {
"job_id": self.job_id,
"tag_name": tag_name
}
try:
response = requests.post("/".join([self.server_url, "model", "model_tag/remove"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model tag remove: {}'.format(response.json().get('retmsg')) + '\n')
if not len(self.model_api('model_tag/retrieve')):
return 0
except Exception:
return
elif command == 'model_tag/retrieve':
post_data = {
"job_id": self.job_id
}
try:
response = requests.post("/".join([self.server_url, "model", "model_tag/retrieve"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model tag retrieve: {}'.format(response.json().get('retmsg')) + '\n')
return response.json().get('data')['tags']
except Exception:
return
elif command == 'model/deploy':
post_data = {
"model_id": self.model_id,
"model_version": self.model_version
}
try:
response = requests.post("/".join([self.server_url, "model", "deploy"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model deploy: {}'.format(response.json().get('retmsg')) + '\n')
if response.json().get('data')['model_id'] == self.model_id and \
response.json().get('data')['model_version'] != self.model_version:
self.model_id = response.json().get('data')['model_id']
self.model_version = response.json().get('data')['model_version']
self.job_id = response.json().get('data')['model_version']
return response.json().get('retcode')
except Exception:
return
elif command == 'model/conf':
post_data = {
"model_id": self.model_id,
"model_version": self.model_version
}
try:
response = requests.post("/".join([self.server_url, "model", "get/predict/conf"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model conf: {}'.format(response.json().get('retmsg')) + '\n')
if response.json().get('data'):
if response.json().get('data')['job_parameters']['common']['model_id'] == post_data['model_id']\
and response.json().get('data')['job_parameters']['common']['model_version'] == \
post_data['model_version'] and response.json().get('data')['initiator']['party_id'] == \
self.guest_party_id[0] and response.json().get('data')['initiator']['role'] == 'guest':
return response.json().get('retcode')
except Exception:
return
elif command == 'model/dsl':
post_data = {
"model_id": self.model_id,
"model_version": self.model_version
}
try:
response = requests.post("/".join([self.server_url, "model", "get/predict/dsl"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model dsl: {}'.format(response.json().get('retmsg')) + '\n')
model_dsl_cpn = list(response.json().get('data')['components'].keys())
train_dsl_cpn = list(self.dsl['components'].keys())
if len([k for k in model_dsl_cpn if k in train_dsl_cpn]) == len(train_dsl_cpn):
return response.json().get('retcode')
except Exception:
return
elif command == 'model/query':
post_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0]
}
try:
response = requests.post("/".join([self.server_url, "model", "query"]), json=post_data)
if response.status_code == 200:
if response.json().get('retcode'):
self.error_log('model query: {}'.format(response.json().get('retmsg')) + '\n')
if response.json().get('data')[0].get('f_model_id') == post_data['model_id'] and \
response.json().get('data')[0].get('f_model_version') == post_data['model_version'] and \
response.json().get('data')[0].get('f_role') == post_data['role'] and \
response.json().get('data')[0].get('f_party_id') == str(post_data['party_id']):
return response.json().get('retcode')
except Exception:
return
def query_status(self, job_id):
while True:
time.sleep(5)
status = self.query_job(job_id=job_id)
if status and status in ["waiting", "running", "success"]:
if status and status == "success":
return 0
else:
return
def judging_state(retcode):
if not retcode and retcode is not None:
return 'success'
else:
return 'failed'
def run_test_api(config_json, namespace):
output_path = './output/flow_test_data/'
os.makedirs(os.path.dirname(output_path), exist_ok=True)
output_path = str(os.path.abspath(output_path)) + '/'
guest_party_id = config_json['guest_party_id']
host_party_id = config_json['host_party_id']
arbiter_party_id = config_json['arbiter_party_id']
train_conf_path = config_json['train_conf_path']
train_dsl_path = config_json['train_dsl_path']
upload_file_path = config_json['upload_file_path']
model_file_path = config_json['model_file_path']
remove_path = str(config_json['data_base_dir']).split("python")[
0] + '/fateflow/model_local_cache/guest#{}#arbiter-{}#guest-{}#host-{}#model/'.format(
guest_party_id[0], arbiter_party_id[0], guest_party_id[0], host_party_id[0])
serving_connect_bool = serving_connect(config_json['serving_setting'])
test_api = TestModel(config_json['data_base_dir'], config_json['server_url'],
component_name=config_json['component_name'], namespace=namespace)
job_conf = test_api.set_config(guest_party_id, host_party_id, arbiter_party_id, train_conf_path)
max_iter = job_conf['component_parameters']['common'][config_json['component_name']]['max_iter']
test_api.set_dsl(train_dsl_path)
conf_file = get_dict_from_file(upload_file_path)
data = PrettyTable()
data.set_style(ORGMODE)
data.field_names = ['data api name', 'status']
data.add_row(['data upload', judging_state(test_api.data_upload(conf_file))])
data.add_row(['data download', judging_state(test_api.data_download(conf_file, output_path))])
data.add_row(['data upload history', judging_state(test_api.data_upload_history(conf_file))])
print(data.get_string(title="data api"))
table = PrettyTable()
table.set_style(ORGMODE)
table.field_names = ['table api name', 'status']
table.add_row(['table info', judging_state(test_api.table_api('table/info', conf_file))])
table.add_row(['delete table', judging_state(test_api.table_api('table/delete', conf_file))])
print(table.get_string(title="table api"))
job = PrettyTable()
job.set_style(ORGMODE)
job.field_names = ['job api name', 'status']
job.add_row(['job stop', judging_state(test_api.job_api('stop'))])
job.add_row(['job rerun', judging_state(test_api.job_api('rerun'))])
job.add_row(['job submit', judging_state(test_api.submit_job(stop=False))])
job.add_row(['job query', judging_state(False if test_api.query_job() == "success" else True)])
job.add_row(['job data view', judging_state(test_api.job_api('data/view/query'))])
job.add_row(['job list', judging_state(test_api.job_api('list/job'))])
job.add_row(['job config', judging_state(test_api.job_config(max_iter=max_iter, output_path=output_path))])
job.add_row(['job log', judging_state(test_api.job_api('log/download', output_path))])
job.add_row(['job dsl generate', judging_state(test_api.job_dsl_generate())])
print(job.get_string(title="job api"))
task = PrettyTable()
task.set_style(ORGMODE)
task.field_names = ['task api name', 'status']
task.add_row(['task list', judging_state(test_api.list_task())])
task.add_row(['task query', judging_state(test_api.query_task())])
print(task.get_string(title="task api"))
tag = PrettyTable()
tag.set_style(ORGMODE)
tag.field_names = ['tag api name', 'status']
tag.add_row(['create tag', judging_state(test_api.tag_api('tag/create', 'create_job_tag'))])
tag.add_row(['update tag', judging_state(test_api.tag_api('tag/update', 'create_job_tag', 'update_job_tag'))])
tag.add_row(['list tag', judging_state(test_api.tag_api('tag/list'))])
tag.add_row(
['retrieve tag', judging_state(not test_api.tag_api('tag/retrieve', 'update_job_tag') == 'update_job_tag')])
tag.add_row(['destroy tag', judging_state(test_api.tag_api('tag/destroy', 'update_job_tag'))])
print(tag.get_string(title="tag api"))
component = PrettyTable()
component.set_style(ORGMODE)
component.field_names = ['component api name', 'status']
component.add_row(['output data', judging_state(test_api.component_api('output/data', output_path=output_path))])
component.add_row(['output table', judging_state(test_api.component_api('output/data/table'))])
component.add_row(['output model', judging_state(test_api.component_api('output/model'))])
component.add_row(['component parameters', judging_state(test_api.component_api('parameters', max_iter=max_iter))])
component.add_row(
['component summary', judging_state(test_api.component_api('summary/download', output_path=output_path))])
component.add_row(['component list', judging_state(test_api.component_list())])
component.add_row(['metrics', judging_state(
test_api.component_metric('metrics', output_path=output_path))])
component.add_row(['metrics all', judging_state(
test_api.component_metric('metric/all', output_path=output_path))])
model = PrettyTable()
model.set_style(ORGMODE)
model.field_names = ['model api name', 'status']
if config_json.get('component_is_homo'):
homo_deploy_path = config_json.get('homo_deploy_path')
homo_deploy_kube_config_path = config_json.get('homo_deploy_kube_config_path')
model.add_row(['model homo convert', judging_state(test_api.model_api('model/homo/convert'))])
model.add_row(['model homo deploy',
judging_state(test_api.model_api('model/homo/deploy',
homo_deploy_path=homo_deploy_path,
homo_deploy_kube_config_path=homo_deploy_kube_config_path))])
if not config_json.get('component_is_homo') and serving_connect_bool:
model_load_conf = get_dict_from_file(model_file_path)
model_load_conf["initiator"]["party_id"] = guest_party_id
model_load_conf["role"].update(
{"guest": [guest_party_id], "host": [host_party_id], "arbiter": [arbiter_party_id]})
model.add_row(['model load', judging_state(test_api.model_api('model/load', model_load_conf=model_load_conf))])
model.add_row(['model bind', judging_state(test_api.model_api('model/bind', model_load_conf=model_load_conf,
servings=config_json['serving_setting']))])
status, model_path = test_api.model_api('model/export', output_path=output_path)
model.add_row(['model export', judging_state(status)])
model.add_row(['model import', (judging_state(
test_api.model_api('model/import', remove_path=remove_path, model_path=model_path)))])
model.add_row(
['model_tag create', judging_state(test_api.model_api('model_tag/create', tag_name='model_tag_create'))])
model.add_row(
['model_tag remove', judging_state(test_api.model_api('model_tag/remove', tag_name='model_tag_create'))])
model.add_row(['model_tag retrieve', judging_state(len(test_api.model_api('model_tag/retrieve')))])
if serving_connect_bool:
model.add_row(
['model migrate', judging_state(test_api.model_api('model/migrate', arbiter_party_id=arbiter_party_id))])
model.add_row(['model query', judging_state(test_api.model_api('model/query'))])
model.add_row(['model deploy', judging_state(test_api.model_api('model/deploy'))])
model.add_row(['model conf', judging_state(test_api.model_api('model/conf'))])
model.add_row(['model dsl', judging_state(test_api.model_api('model/dsl'))])
print(model.get_string(title="model api"))
component.add_row(['metrics delete', judging_state(
test_api.component_metric('metric/delete', output_path=output_path))])
print(component.get_string(title="component api"))
queue = PrettyTable()
queue.set_style(ORGMODE)
queue.field_names = ['api name', 'status']
test_api.submit_job()
test_api.submit_job()
test_api.submit_job()
queue.add_row(['clean/queue', judging_state(test_api.job_api('clean/queue'))])
print(queue.get_string(title="queue job"))
print('Please check the error content: {}'.format(test_api.error_log(None)))
| 45,274 | 48.972406 | 126 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/flow_test/flow_process.py
|
import json
import os
import tarfile
import time
import subprocess
from contextlib import closing
from datetime import datetime
import requests
def get_dict_from_file(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
json_info = json.load(f)
return json_info
def serving_connect(serving_setting):
subp = subprocess.Popen([f'echo "" | telnet {serving_setting.split(":")[0]} {serving_setting.split(":")[1]}'],
shell=True, stdout=subprocess.PIPE)
stdout, stderr = subp.communicate()
stdout = stdout.decode("utf-8")
return True if f'Connected to {serving_setting.split(":")[0]}' in stdout else False
class Base(object):
def __init__(self, data_base_dir, server_url, component_name):
self.config = None
self.dsl = None
self.guest_party_id = None
self.host_party_id = None
self.job_id = None
self.model_id = None
self.model_version = None
self.data_base_dir = data_base_dir
self.server_url = server_url
self.component_name = component_name
def set_config(self, guest_party_id, host_party_id, arbiter_party_id, path):
self.config = get_dict_from_file(path)
self.config["initiator"]["party_id"] = guest_party_id[0]
self.config["role"]["guest"] = guest_party_id
self.config["role"]["host"] = host_party_id
if "arbiter" in self.config["role"]:
self.config["role"]["arbiter"] = arbiter_party_id
self.guest_party_id = guest_party_id
self.host_party_id = host_party_id
return self.config
def set_dsl(self, path):
self.dsl = get_dict_from_file(path)
return self.dsl
def submit(self):
post_data = {'job_runtime_conf': self.config, 'job_dsl': self.dsl}
print(f"start submit job, data:{post_data}")
response = requests.post("/".join([self.server_url, "job", "submit"]), json=post_data)
if response.status_code == 200 and not response.json().get('retcode'):
self.job_id = response.json().get("jobId")
print(f"submit job success: {response.json()}")
self.model_id = response.json().get("data").get("model_info").get("model_id")
self.model_version = response.json().get("data").get("model_info").get("model_version")
return True
else:
print(f"submit job failed: {response.text}")
return False
def query_job(self):
post_data = {'job_id': self.job_id}
response = requests.post("/".join([self.server_url, "job", "query"]), json=post_data)
if response.status_code == 200:
if response.json().get("data"):
return response.json().get("data")[0].get("f_status")
return False
def wait_success(self, timeout=60 * 10):
for i in range(timeout // 10):
time.sleep(10)
status = self.query_job()
print("job {} status is {}".format(self.job_id, status))
if status and status == "success":
return True
if status and status in ["canceled", "timeout", "failed"]:
return False
return False
def get_component_output_data(self, output_path=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
if not output_path:
output_path = './output/data'
os.makedirs(os.path.dirname(output_path), exist_ok=True)
tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(post_data['job_id'], post_data['component_name'],
post_data['role'], post_data['party_id'])
extract_dir = os.path.join(output_path, tar_file_name.replace('.tar.gz', ''))
print("start get component output dat")
with closing(
requests.get("/".join([self.server_url, "tracking", "component/output/data/download"]), json=post_data,
stream=True)) as response:
if response.status_code == 200:
try:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
print(f'get component output path {extract_dir}')
except BaseException:
print(f"get component output data failed")
return False
def get_output_data_table(self):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
response = requests.post("/".join([self.server_url, "tracking", "component/output/data/table"]), json=post_data)
result = {}
try:
if response.status_code == 200:
result["name"] = response.json().get("data")[0].get("table_name")
result["namespace"] = response.json().get("data")[0].get("namespace")
except Exception as e:
raise RuntimeError(f"output data table error: {response}") from e
return result
def get_table_info(self, table_name):
post_data = {
"name": table_name['name'],
"namespace": table_name['namespace']
}
response = requests.post("/".join([self.server_url, "table", "table_info"]), json=post_data)
try:
if response.status_code == 200:
table_count = response.json().get("data").get("count")
else:
raise RuntimeError(f"get table info failed: {response}")
except Exception as e:
raise RuntimeError(f"get table count error: {response}") from e
return table_count
def get_auc(self):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": "evaluation_0"
}
response = requests.post("/".join([self.server_url, "tracking", "component/metric/all"]), json=post_data)
try:
if response.status_code == 200:
auc = response.json().get("data").get("train").get(self.component_name).get("data")[0][1]
else:
raise RuntimeError(f"get metrics failed: {response}")
except Exception as e:
raise RuntimeError(f"get table count error: {response}") from e
return auc
class TrainLRModel(Base):
def get_component_metrics(self, metric_output_path, file=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": "evaluation_0"
}
response = requests.post("/".join([self.server_url, "tracking", "component/metric/all"]), json=post_data)
if response.status_code == 200:
if response.json().get("data"):
if not file:
file = metric_output_path.format(self.job_id)
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as fp:
json.dump(response.json().get("data"), fp)
print(f"save component metrics success, path is:{os.path.abspath(file)}")
else:
print(f"get component metrics:{response.json()}")
return False
def get_component_output_model(self, model_output_path, file=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
print(f"request component output model: {post_data}")
response = requests.post("/".join([self.server_url, "tracking", "component/output/model"]), json=post_data)
if response.status_code == 200:
if response.json().get("data"):
if not file:
file = model_output_path.format(self.job_id)
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as fp:
json.dump(response.json().get("data"), fp)
print(f"save component output model success, path is:{os.path.abspath(file)}")
else:
print(f"get component output model:{response.json()}")
return False
class PredictLRMode(Base):
def set_predict(self, guest_party_id, host_party_id, arbiter_party_id, model_id, model_version, path):
self.set_config(guest_party_id, host_party_id, arbiter_party_id, path)
if self.config["job_parameters"].get("common"):
self.config["job_parameters"]["common"]["model_id"] = model_id
self.config["job_parameters"]["common"]["model_version"] = model_version
else:
self.config["job_parameters"]["model_id"] = model_id
self.config["job_parameters"]["model_version"] = model_version
def download_from_request(http_response, tar_file_name, extract_dir):
with open(tar_file_name, 'wb') as fw:
for chunk in http_response.iter_content(1024):
if chunk:
fw.write(chunk)
tar = tarfile.open(tar_file_name, "r:gz")
file_names = tar.getnames()
for file_name in file_names:
tar.extract(file_name, extract_dir)
tar.close()
os.remove(tar_file_name)
def train_job(data_base_dir, guest_party_id, host_party_id, arbiter_party_id, train_conf_path, train_dsl_path,
server_url, component_name, metric_output_path, model_output_path, constant_auc):
train = TrainLRModel(data_base_dir, server_url, component_name)
train.set_config(guest_party_id, host_party_id, arbiter_party_id, train_conf_path)
train.set_dsl(train_dsl_path)
status = train.submit()
if status:
is_success = train.wait_success(timeout=600)
if is_success:
train.get_component_metrics(metric_output_path)
train.get_component_output_model(model_output_path)
train.get_component_output_data()
train_auc = train.get_auc()
assert abs(constant_auc - train_auc) <= 1e-4, 'The training result is wrong, auc: {}'.format(train_auc)
train_data_count = train.get_table_info(train.get_output_data_table())
return train, train_data_count
return False
def predict_job(data_base_dir, guest_party_id, host_party_id, arbiter_party_id, predict_conf_path, predict_dsl_path,
model_id, model_version, server_url, component_name):
predict = PredictLRMode(data_base_dir, server_url, component_name)
predict.set_predict(guest_party_id, host_party_id, arbiter_party_id, model_id, model_version, predict_conf_path)
predict.set_dsl(predict_dsl_path)
status = predict.submit()
if status:
is_success = predict.wait_success(timeout=600)
if is_success:
predict.get_component_output_data()
predict_data_count = predict.get_table_info(predict.get_output_data_table())
return predict, predict_data_count
return False
class UtilizeModel:
def __init__(self, model_id, model_version, server_url):
self.model_id = model_id
self.model_version = model_version
self.deployed_model_version = None
self.service_id = None
self.server_url = server_url
def deploy_model(self):
post_data = {
"model_id": self.model_id,
"model_version": self.model_version
}
response = requests.post("/".join([self.server_url, "model", "deploy"]), json=post_data)
print(f'Request data of deploy model request: {json.dumps(post_data, indent=4)}')
if response.status_code == 200:
resp_data = response.json()
print(f'Response of model deploy request: {json.dumps(resp_data, indent=4)}')
if resp_data.get("retcode", 100) == 0:
self.deployed_model_version = resp_data.get("data", {}).get("model_version")
else:
raise Exception(f"Model {self.model_id} {self.model_version} deploy failed, "
f"details: {resp_data.get('retmsg')}")
else:
raise Exception(f"Request model deploy api failed, status code: {response.status_code}")
def load_model(self):
post_data = {
"job_id": self.deployed_model_version
}
response = requests.post("/".join([self.server_url, "model", "load"]), json=post_data)
print(f'Request data of load model request: {json.dumps(post_data, indent=4)}')
if response.status_code == 200:
resp_data = response.json()
print(f'Response of load model request: {json.dumps(resp_data, indent=4)}')
if not resp_data.get('retcode'):
return True
raise Exception(f"Load model {self.model_id} {self.deployed_model_version} failed, "
f"details: {resp_data.get('retmsg')}")
raise Exception(f"Request model load api failed, status code: {response.status_code}")
def bind_model(self):
post_data = {
"job_id": self.deployed_model_version,
"service_id": f"auto_test_{datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')}"
}
response = requests.post("/".join([self.server_url, "model", "bind"]), json=post_data)
print(f'Request data of bind model request: {json.dumps(post_data, indent=4)}')
if response.status_code == 200:
resp_data = response.json()
print(f'Response data of bind model request: {json.dumps(resp_data, indent=4)}')
if not resp_data.get('retcode'):
self.service_id = post_data.get('service_id')
return True
raise Exception(f"Bind model {self.model_id} {self.deployed_model_version} failed, "
f"details: {resp_data.get('retmsg')}")
raise Exception(f"Request model bind api failed, status code: {response.status_code}")
def online_predict(self, online_serving, phone_num):
serving_url = f"http://{online_serving}/federation/1.0/inference"
post_data = {
"head": {
"serviceId": self.service_id
},
"body": {
"featureData": {
"phone_num": phone_num,
},
"sendToRemoteFeatureData": {
"device_type": "imei",
"phone_num": phone_num,
"encrypt_type": "raw"
}
}
}
headers = {"Content-Type": "application/json"}
response = requests.post(serving_url, json=post_data, headers=headers)
print(f"Request data of online predict request: {json.dumps(post_data, indent=4)}")
if response.status_code == 200:
print(f"Online predict successfully, response: {json.dumps(response.json(), indent=4)}")
else:
print(f"Online predict successfully, details: {response.text}")
def run_fate_flow_test(config_json):
data_base_dir = config_json['data_base_dir']
guest_party_id = config_json['guest_party_id']
host_party_id = config_json['host_party_id']
arbiter_party_id = config_json['arbiter_party_id']
train_conf_path = config_json['train_conf_path']
train_dsl_path = config_json['train_dsl_path']
server_url = config_json['server_url']
online_serving = config_json['online_serving']
constant_auc = config_json['train_auc']
component_name = config_json['component_name']
metric_output_path = config_json['metric_output_path']
model_output_path = config_json['model_output_path']
serving_connect_bool = serving_connect(config_json['serving_setting'])
phone_num = config_json['phone_num']
print('submit train job')
# train
train, train_count = train_job(data_base_dir, guest_party_id, host_party_id, arbiter_party_id, train_conf_path,
train_dsl_path, server_url, component_name, metric_output_path, model_output_path, constant_auc)
if not train:
print('train job run failed')
return False
print('train job success')
# deploy
print('start deploy model')
utilize = UtilizeModel(train.model_id, train.model_version, server_url)
utilize.deploy_model()
print('deploy model success')
# predict
predict_conf_path = config_json['predict_conf_path']
predict_dsl_path = config_json['predict_dsl_path']
model_id = train.model_id
model_version = utilize.deployed_model_version
print('start submit predict job')
predict, predict_count = predict_job(data_base_dir, guest_party_id, host_party_id, arbiter_party_id, predict_conf_path,
predict_dsl_path, model_id, model_version, server_url, component_name)
if not predict:
print('predict job run failed')
return False
if train_count != predict_count:
print('Loss of forecast data')
return False
print('predict job success')
if not config_json.get('component_is_homo') and serving_connect_bool:
# load model
utilize.load_model()
# bind model
utilize.bind_model()
# online predict
utilize.online_predict(online_serving=online_serving, phone_num=phone_num)
| 17,623 | 42.516049 | 131 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/flow_test/flow_cli_api.py
|
import json
import os
import sys
import shutil
import time
import subprocess
import numpy as np
from pathlib import Path
from prettytable import PrettyTable, ORGMODE
from fate_test.flow_test.flow_process import get_dict_from_file, serving_connect
class TestModel(object):
def __init__(self, data_base_dir, fate_flow_path, component_name, namespace):
self.conf_path = None
self.dsl_path = None
self.job_id = None
self.model_id = None
self.model_version = None
self.guest_party_id = None
self.host_party_id = None
self.arbiter_party_id = None
self.output_path = None
self.cache_directory = None
self.data_base_dir = data_base_dir
self.fate_flow_path = fate_flow_path
self.component_name = component_name
self.python_bin = sys.executable or 'python3'
self.request_api_info_path = f'./logs/{namespace}/cli_exception.log'
os.makedirs(os.path.dirname(self.request_api_info_path), exist_ok=True)
def error_log(self, retmsg):
if retmsg is None:
return os.path.abspath(self.request_api_info_path)
with open(self.request_api_info_path, "a") as f:
f.write(retmsg)
def submit_job(self, stop=True):
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "submit_job", "-d", self.dsl_path,
"-c", self.conf_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job submit: {}'.format(stdout.get('retmsg')) + '\n')
self.job_id = stdout.get("jobId")
self.model_id = stdout.get("data").get("model_info").get("model_id")
self.model_version = stdout.get("data").get("model_info").get("model_version")
if stop:
return
return self.query_status()
except Exception:
return
def job_api(self, command):
if command == 'stop_job':
self.submit_job()
time.sleep(5)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job stop: {}'.format(stdout.get('retmsg')) + '\n')
if self.query_job() == "canceled":
return stdout.get('retcode')
except Exception:
return
elif command == 'job_log_download':
log_file_dir = os.path.join(self.output_path, 'job_{}_log'.format(self.job_id))
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id, "-o",
log_file_dir], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job log: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'data_view_query':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data view queue: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get("data")) == len(list(get_dict_from_file(self.dsl_path)['components'].keys())) - 1:
return stdout.get('retcode')
except Exception:
return
elif command == 'clean_job':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('clean job: {}'.format(stdout.get('retmsg')) + '\n')
subp = subprocess.Popen([self.python_bin,
self.fate_flow_path,
"-f",
"component_metrics",
"-j",
self.job_id,
"-r",
"guest",
"-p",
str(self.guest_party_id[0]),
"-cpn",
'evaluation_0'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
metric, stderr = subp.communicate()
metric = json.loads(metric.decode("utf-8"))
if not metric.get('data'):
return stdout.get('retcode')
except Exception:
return
elif command == 'clean_queue':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('clean queue: {}'.format(stdout.get('retmsg')) + '\n')
if not self.query_job(queue=True):
return stdout.get('retcode')
except Exception:
return
def query_job(self, job_id=None, queue=False):
if job_id is None:
job_id = self.job_id
time.sleep(1)
try:
if not queue:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "query_job", "-j", job_id],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if not stdout.get('retcode'):
return stdout.get("data")[0].get("f_status")
else:
self.error_log('query job: {}'.format(stdout.get('retmsg')) + '\n')
else:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "query_job", "-j", job_id, "-s",
"waiting"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if not stdout.get('retcode'):
return len(stdout.get("data"))
except Exception:
return
def job_config(self, max_iter):
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "job_config", "-j", self.job_id, "-r",
"guest", "-p", str(self.guest_party_id[0]), "-o", self.output_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job config: {}'.format(stdout.get('retmsg')) + '\n')
job_conf_path = stdout.get('directory') + '/runtime_conf.json'
job_conf = get_dict_from_file(job_conf_path)
if max_iter == job_conf['component_parameters']['common'][self.component_name]['max_iter']:
return stdout.get('retcode')
except Exception:
return
def query_task(self):
try:
subp = subprocess.Popen(
[self.python_bin, self.fate_flow_path, "-f", "query_task", "-j", self.job_id, "-r", "guest",
"-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('task query: {}'.format(stdout.get('retmsg')) + '\n')
status = stdout.get("data")[0].get("f_status")
if status == "success":
return stdout.get('retcode')
except Exception:
return
def component_api(self, command, max_iter=None):
component_output_path = os.path.join(self.output_path, 'job_{}_output_data'.format(self.job_id))
if command == 'component_output_data':
try:
subp = subprocess.Popen(
[self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id, "-r",
"guest", "-p", str(self.guest_party_id[0]), "-cpn", self.component_name, "-o",
component_output_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component output data: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'component_output_data_table':
try:
subp = subprocess.Popen(
[self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id, "-r",
"guest", "-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component output data table: {}'.format(stdout.get('retmsg')) + '\n')
table = {'table_name': stdout.get("data")[0].get("table_name"),
'namespace': stdout.get("data")[0].get("namespace")}
if not self.table_api('table_info', table):
return stdout.get('retcode')
except Exception:
return
elif command == 'component_output_model':
try:
subp = subprocess.Popen([self.python_bin,
self.fate_flow_path,
"-f",
command,
"-r",
"guest",
"-j",
self.job_id,
"-p",
str(self.guest_party_id[0]),
"-cpn",
self.component_name],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component output model: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
return stdout.get('retcode')
except Exception:
return
elif command == 'component_parameters':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component parameters: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data', {}).get('ComponentParam', {}).get('max_iter', {}) == max_iter:
return stdout.get('retcode')
except Exception:
return
elif command == 'component_metrics':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", 'evaluation_0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component metrics: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
metrics_file = self.output_path + '{}_metrics.json'.format(self.job_id)
with open(metrics_file, 'w') as fp:
json.dump(stdout.get("data"), fp)
return stdout.get('retcode')
except Exception:
return
elif command == 'component_metric_all':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", 'evaluation_0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component metric all: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
metric_all_file = self.output_path + '{}_metric_all.json'.format(self.job_id)
with open(metric_all_file, 'w') as fp:
json.dump(stdout.get("data"), fp)
return stdout.get('retcode')
except Exception:
return
elif command == 'component_metric_delete':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j",
self.job_id, "-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn",
'evaluation_0'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component metric delete: {}'.format(stdout.get('retmsg')) + '\n')
subp = subprocess.Popen([self.python_bin,
self.fate_flow_path,
"-f",
"component_metrics",
"-j",
self.job_id,
"-r",
"guest",
"-p",
str(self.guest_party_id[0]),
"-cpn",
'evaluation_0'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
metric, stderr = subp.communicate()
metric = json.loads(metric.decode("utf-8"))
if not metric.get('data'):
return stdout.get('retcode')
except Exception:
return
def table_api(self, command, table_name):
if command == 'table_info':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-t",
table_name['table_name'], "-n", table_name['namespace']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('table info: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data')['namespace'] == table_name['namespace'] and \
stdout.get('data')['table_name'] == table_name['table_name']:
return stdout.get('retcode')
except Exception:
return
elif command == 'table_delete':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-t",
table_name['table_name'], "-n", table_name['namespace']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('table delete: {}'.format(stdout.get('retmsg')) + '\n')
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "table_delete", "-t",
table_name['table_name'], "-n", table_name['namespace']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
return 0
except Exception:
return
def data_upload(self, upload_path, table_index=None):
upload_file = get_dict_from_file(upload_path)
upload_file['file'] = str(self.data_base_dir.joinpath(upload_file['file']).resolve())
upload_file['drop'] = 1
upload_file['use_local_data'] = 0
if table_index is not None:
upload_file['table_name'] = f'{upload_file["file"]}_{table_index}'
upload_path = self.cache_directory + 'upload_file.json'
with open(upload_path, 'w') as fp:
json.dump(upload_file, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "upload", "-c",
upload_path, "-drop", "1"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data upload: {}'.format(stdout.get('retmsg')) + '\n')
return self.query_status(stdout.get("jobId"))
except Exception:
return
def data_download(self, table_name, output_path):
download_config = {
"table_name": table_name['table_name'],
"namespace": table_name['namespace'],
"output_path": output_path + '{}download.csv'.format(self.job_id)
}
config_file_path = self.cache_directory + 'download_config.json'
with open(config_file_path, 'w') as fp:
json.dump(download_config, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "download", "-c", config_file_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data download: {}'.format(stdout.get('retmsg')) + '\n')
return self.query_status(stdout.get("jobId"))
except Exception:
return
def data_upload_history(self, conf_file):
self.data_upload(conf_file, table_index=1)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "upload_history", "-limit", "2"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data upload history: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get('data')) == 2:
return stdout.get('retcode')
except Exception:
return
def model_api(self, command, remove_path=None, model_path=None, model_load_conf=None, servings=None):
if model_load_conf is not None:
model_load_conf["job_parameters"].update({"model_id": self.model_id,
"model_version": self.model_version})
if command == 'load':
model_load_path = self.cache_directory + 'model_load_file.json'
with open(model_load_path, 'w') as fp:
json.dump(model_load_conf, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", model_load_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model load: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'bind':
service_id = "".join([str(i) for i in np.random.randint(9, size=8)])
model_load_conf.update({"service_id": service_id, "servings": [servings]})
model_bind_path = self.cache_directory + 'model_load_file.json'
with open(model_bind_path, 'w') as fp:
json.dump(model_load_conf, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", model_bind_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model bind: {}'.format(stdout.get('retmsg')) + '\n')
else:
return stdout.get('retcode')
except Exception:
return
elif command == 'import':
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
"file": model_path,
"force_update": 1,
}
config_file_path = self.cache_directory + 'model_import.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
try:
remove_path = Path(remove_path + self.model_version)
if os.path.isdir(remove_path):
shutil.rmtree(remove_path)
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", config_file_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if not stdout.get('retcode') and os.path.isdir(remove_path):
return 0
else:
self.error_log('model import: {}'.format(stdout.get('retmsg')) + '\n')
except Exception:
return
elif command == 'export':
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0]
}
config_file_path = self.cache_directory + 'model_export.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", config_file_path, "-o",
self.output_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model export: {}'.format(stdout.get('retmsg')) + '\n')
else:
export_model_path = stdout.get('file')
return stdout.get('retcode'), export_model_path
elif command in ['store', 'restore']:
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0]
}
config_file_path = self.cache_directory + 'model_store.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", config_file_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model {}: {}'.format(command, stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
def query_status(self, job_id=None):
while True:
time.sleep(5)
status = self.query_job(job_id=job_id)
if status and status in ["waiting", "running", "success"]:
if status and status == "success":
return 0
else:
return
def set_config(self, guest_party_id, host_party_id, arbiter_party_id, path, component_name):
config = get_dict_from_file(path)
config["initiator"]["party_id"] = guest_party_id[0]
config["role"]["guest"] = guest_party_id
config["role"]["host"] = host_party_id
if "arbiter" in config["role"]:
config["role"]["arbiter"] = arbiter_party_id
self.guest_party_id = guest_party_id
self.host_party_id = host_party_id
self.arbiter_party_id = arbiter_party_id
conf_file_path = self.cache_directory + 'conf_file.json'
with open(conf_file_path, 'w') as fp:
json.dump(config, fp)
self.conf_path = conf_file_path
return config['component_parameters']['common'][component_name]['max_iter']
def judging_state(retcode):
if not retcode and retcode is not None:
return 'success'
else:
return 'failed'
def run_test_api(config_json, namespace):
output_path = './output/flow_test_data/'
os.makedirs(os.path.dirname(output_path), exist_ok=True)
fate_flow_path = config_json['data_base_dir'] / 'fateflow' / 'python' / 'fate_flow' / 'fate_flow_client.py'
if not fate_flow_path.exists():
raise FileNotFoundError(f'fate_flow not found. filepath: {fate_flow_path}')
test_api = TestModel(config_json['data_base_dir'], str(fate_flow_path), config_json['component_name'], namespace)
test_api.dsl_path = config_json['train_dsl_path']
test_api.cache_directory = config_json['cache_directory']
test_api.output_path = str(os.path.abspath(output_path)) + '/'
conf_path = config_json['train_conf_path']
guest_party_id = config_json['guest_party_id']
host_party_id = config_json['host_party_id']
arbiter_party_id = config_json['arbiter_party_id']
upload_file_path = config_json['upload_file_path']
model_file_path = config_json['model_file_path']
conf_file = get_dict_from_file(upload_file_path)
serving_connect_bool = serving_connect(config_json['serving_setting'])
remove_path = str(config_json['data_base_dir']).split("python")[
0] + '/fateflow/model_local_cache/guest#{}#arbiter-{}#guest-{}#host-{}#model/'.format(
guest_party_id[0], arbiter_party_id[0], guest_party_id[0], host_party_id[0])
max_iter = test_api.set_config(guest_party_id, host_party_id, arbiter_party_id, conf_path,
config_json['component_name'])
data = PrettyTable()
data.set_style(ORGMODE)
data.field_names = ['data api name', 'status']
data.add_row(['data upload', judging_state(test_api.data_upload(upload_file_path))])
data.add_row(['data download', judging_state(test_api.data_download(conf_file, output_path))])
data.add_row(
['data upload history', judging_state(test_api.data_upload_history(upload_file_path))])
print(data.get_string(title="data api"))
table = PrettyTable()
table.set_style(ORGMODE)
table.field_names = ['table api name', 'status']
table.add_row(['table info', judging_state(test_api.table_api('table_info', conf_file))])
table.add_row(['delete table', judging_state(test_api.table_api('table_delete', conf_file))])
print(table.get_string(title="table api"))
job = PrettyTable()
job.set_style(ORGMODE)
job.field_names = ['job api name', 'status']
job.add_row(['job stop', judging_state(test_api.job_api('stop_job'))])
job.add_row(['job submit', judging_state(test_api.submit_job(stop=False))])
job.add_row(['job query', judging_state(False if test_api.query_job() == "success" else True)])
job.add_row(['job data view', judging_state(test_api.job_api('data_view_query'))])
job.add_row(['job config', judging_state(test_api.job_config(max_iter=max_iter))])
job.add_row(['job log', judging_state(test_api.job_api('job_log_download'))])
task = PrettyTable()
task.set_style(ORGMODE)
task.field_names = ['task api name', 'status']
task.add_row(['task query', judging_state(test_api.query_task())])
print(task.get_string(title="task api"))
component = PrettyTable()
component.set_style(ORGMODE)
component.field_names = ['component api name', 'status']
component.add_row(['output data', judging_state(test_api.component_api('component_output_data'))])
component.add_row(['output table', judging_state(test_api.component_api('component_output_data_table'))])
component.add_row(['output model', judging_state(test_api.component_api('component_output_model'))])
component.add_row(
['component parameters', judging_state(test_api.component_api('component_parameters', max_iter=max_iter))])
component.add_row(['metrics', judging_state(test_api.component_api('component_metrics'))])
component.add_row(['metrics all', judging_state(test_api.component_api('component_metric_all'))])
model = PrettyTable()
model.set_style(ORGMODE)
model.field_names = ['model api name', 'status']
if not config_json.get('component_is_homo') and serving_connect_bool:
model_load_conf = get_dict_from_file(model_file_path)
model_load_conf["initiator"]["party_id"] = guest_party_id
model_load_conf["role"].update(
{"guest": [guest_party_id], "host": [host_party_id], "arbiter": [arbiter_party_id]})
model.add_row(['model load', judging_state(test_api.model_api('load', model_load_conf=model_load_conf))])
model.add_row(['model bind', judging_state(
test_api.model_api('bind', model_load_conf=model_load_conf, servings=config_json['serving_setting']))])
status, model_path = test_api.model_api('export')
model.add_row(['model export', judging_state(status)])
model.add_row(['model import', (judging_state(
test_api.model_api('import', remove_path=remove_path, model_path=model_path)))])
model.add_row(['model store', (judging_state(test_api.model_api('store')))])
model.add_row(['model restore', (judging_state(test_api.model_api('restore')))])
print(model.get_string(title="model api"))
component.add_row(['metrics delete', judging_state(test_api.component_api('component_metric_delete'))])
print(component.get_string(title="component api"))
test_api.submit_job()
test_api.submit_job()
test_api.submit_job()
job.add_row(['clean job', judging_state(test_api.job_api('clean_job'))])
job.add_row(['clean queue', judging_state(test_api.job_api('clean_queue'))])
print(job.get_string(title="job api"))
print('Please check the error content: {}'.format(test_api.error_log(None)))
| 33,971 | 49.780269 | 120 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/flow_test/flow_sdk_api.py
|
import json
import os
import shutil
import time
import numpy as np
from pathlib import Path
from flow_sdk.client import FlowClient
from prettytable import PrettyTable, ORGMODE
from fate_test.flow_test.flow_process import get_dict_from_file, serving_connect
class TestModel(object):
def __init__(self, data_base_dir, server_url, component_name, namespace):
self.conf_path = None
self.dsl_path = None
self.job_id = None
self.model_id = None
self.model_version = None
self.guest_party_id = None
self.host_party_id = None
self.arbiter_party_id = None
self.output_path = None
self.cache_directory = None
self.data_base_dir = data_base_dir
self.component_name = component_name
self.client = FlowClient(server_url.split(':')[0], server_url.split(':')[1].split('/')[0],
server_url.split(':')[1].split('/')[1])
self.request_api_info_path = f'./logs/{namespace}/sdk_exception.log'
os.makedirs(os.path.dirname(self.request_api_info_path), exist_ok=True)
def error_log(self, retmsg):
if retmsg is None:
return os.path.abspath(self.request_api_info_path)
with open(self.request_api_info_path, "a") as f:
f.write(retmsg)
def submit_job(self, stop=True):
try:
stdout = self.client.job.submit(config_data=get_dict_from_file(self.conf_path),
dsl_data=get_dict_from_file(self.dsl_path))
if stdout.get('retcode'):
self.error_log('job submit: {}'.format(stdout.get('retmsg')) + '\n')
self.job_id = stdout.get("jobId")
self.model_id = stdout.get("data").get("model_info").get("model_id")
self.model_version = stdout.get("data").get("model_info").get("model_version")
if stop:
return
return self.query_status()
except Exception:
return
def job_dsl_generate(self):
train_dsl = {"components": {"data_transform_0": {"module": "DataTransform", "input": {"data": {"data": []}},
"output": {"data": ["train"], "model": ["data_transform"]}}}}
train_dsl_path = self.cache_directory + 'generate_dsl_file.json'
with open(train_dsl_path, 'w') as fp:
json.dump(train_dsl, fp)
try:
stdout = self.client.job.generate_dsl(train_dsl=get_dict_from_file(train_dsl_path),
cpn=['data_transform_0'])
if stdout.get('retcode'):
self.error_log('job dsl generate: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data')['components']['data_transform_0']['input']['model'][
0] == 'pipeline.data_transform_0.data_transform':
return stdout.get('retcode')
except Exception:
return
def job_api(self, command):
if command == 'stop':
self.submit_job()
time.sleep(5)
try:
stdout = self.client.job.stop(job_id=self.job_id)
if stdout.get('retcode'):
self.error_log('job stop: {}'.format(stdout.get('retmsg')) + '\n')
if self.query_job() == "canceled":
return stdout.get('retcode')
except Exception:
return
elif command == 'list/job':
try:
stdout = self.client.job.list(limit=3)
if stdout.get('retcode'):
self.error_log('job list: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get('data', {}).get('jobs', [])) == 3:
return stdout.get('retcode')
except Exception:
return
elif command == 'view':
try:
stdout = self.client.job.view(job_id=self.job_id, role="guest")
if stdout.get('retcode'):
self.error_log('job view: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get("data")) == len(list(get_dict_from_file(self.dsl_path)['components'].keys())) - 1:
return stdout.get('retcode')
except Exception:
return
elif command == 'log':
log_file_dir = os.path.join(self.output_path, 'job_{}_log'.format(self.job_id))
try:
stdout = self.client.job.log(job_id=self.job_id, output_path=log_file_dir)
if stdout.get('retcode'):
self.error_log('job log: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'clean/queue':
try:
stdout = self.client.queue.clean()
if stdout.get('retcode'):
self.error_log('clean queue: {}'.format(stdout.get('retmsg')) + '\n')
if not self.query_job(queue=True):
return stdout.get('retcode')
except Exception:
return
def query_job(self, job_id=None, queue=False):
if job_id is None:
job_id = self.job_id
time.sleep(1)
try:
if not queue:
stdout = self.client.job.query(job_id=job_id)
if not stdout.get('retcode'):
return stdout.get("data")[0].get("f_status")
else:
self.error_log('query job: {}'.format(stdout.get('retmsg')) + '\n')
else:
stdout = self.client.job.query(job_id=job_id, status='waiting')
if not stdout.get('retcode'):
return len(stdout.get("data"))
except Exception:
return
def job_config(self, max_iter):
try:
stdout = self.client.job.config(job_id=self.job_id, role="guest", party_id=self.guest_party_id[0],
output_path=self.output_path)
if stdout.get('retcode'):
self.error_log('job config: {}'.format(stdout.get('retmsg')) + '\n')
job_conf_path = stdout.get('directory') + '/runtime_conf.json'
job_conf = get_dict_from_file(job_conf_path)
if max_iter == job_conf['component_parameters']['common'][self.component_name]['max_iter']:
return stdout.get('retcode')
except Exception:
return
def query_task(self):
try:
stdout = self.client.task.query(job_id=self.job_id, role="guest", party_id=self.guest_party_id[0],
component_name=self.component_name)
if stdout.get('retcode'):
self.error_log('task query: {}'.format(stdout.get('retmsg')) + '\n')
status = stdout.get("data")[0].get("f_status")
if status == "success":
return stdout.get('retcode')
except Exception:
return
def list_task(self):
try:
stdout = self.client.task.list(limit=3)
if stdout.get('retcode'):
self.error_log('list task: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get('data', {}).get('tasks', [])) == 3:
return stdout.get('retcode')
except Exception:
return
def component_api(self, command, max_iter=None):
component_output_path = os.path.join(self.output_path, 'job_{}_output_data'.format(self.job_id))
if command == 'output/data':
try:
stdout = self.client.component.output_data(job_id=self.job_id, role="guest",
party_id=self.guest_party_id[0],
component_name=self.component_name,
output_path=component_output_path)
if stdout.get('retcode'):
self.error_log('component output data: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'output/data/table':
try:
stdout = self.client.component.output_data_table(job_id=self.job_id, role="guest",
party_id=self.guest_party_id[0],
component_name=self.component_name)
if stdout.get('retcode'):
self.error_log('component output data table: {}'.format(stdout.get('retmsg')) + '\n')
table = {'table_name': stdout.get("data")[0].get("table_name"),
'namespace': stdout.get("data")[0].get("namespace")}
if not self.table_api('table_info', table):
return stdout.get('retcode')
except Exception:
return
elif command == 'output/model':
try:
stdout = self.client.component.output_model(job_id=self.job_id, role="guest",
party_id=self.guest_party_id[0],
component_name=self.component_name)
if stdout.get('retcode'):
self.error_log('component output model: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
return stdout.get('retcode')
except Exception:
return
elif command == 'parameters':
try:
stdout = self.client.component.parameters(job_id=self.job_id, role="guest",
party_id=self.guest_party_id[0],
component_name=self.component_name)
if stdout.get('retcode'):
self.error_log('component parameters: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data', {}).get('ComponentParam', {}).get('max_iter', {}) == max_iter:
return stdout.get('retcode')
except Exception:
return
elif command == 'summary':
try:
stdout = self.client.component.get_summary(job_id=self.job_id, role="guest",
party_id=self.guest_party_id[0],
component_name=self.component_name)
if stdout.get('retcode'):
self.error_log('component summary download: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
summary_file = self.output_path + '{}_summary.json'.format(self.job_id)
with open(summary_file, 'w') as fp:
json.dump(stdout.get("data"), fp)
return stdout.get('retcode')
except Exception:
return
elif command == 'metrics':
try:
stdout = self.client.component.metrics(job_id=self.job_id, role="guest",
party_id=self.guest_party_id[0],
component_name='evaluation_0')
if stdout.get('retcode'):
self.error_log('component metrics: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
metrics_file = self.output_path + '{}_metrics.json'.format(self.job_id)
with open(metrics_file, 'w') as fp:
json.dump(stdout.get("data"), fp)
return stdout.get('retcode')
except Exception:
return
elif command == 'metric/all':
try:
stdout = self.client.component.metric_all(job_id=self.job_id, role="guest",
party_id=self.guest_party_id[0],
component_name='evaluation_0')
if stdout.get('retcode'):
self.error_log('component metric all: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
metric_all_file = self.output_path + '{}_metric_all.json'.format(self.job_id)
with open(metric_all_file, 'w') as fp:
json.dump(stdout.get("data"), fp)
return stdout.get('retcode')
except Exception:
return
elif command == 'metric/delete':
try:
stdout = self.client.component.metric_delete(job_id=self.job_id, date=str(time.strftime("%Y%m%d")))
if stdout.get('retcode'):
self.error_log('component metric delete: {}'.format(stdout.get('retmsg')) + '\n')
metric = self.client.component.metrics(job_id=self.job_id, role="guest",
party_id=self.guest_party_id[0],
component_name='evaluation_0')
if not metric.get('data'):
return stdout.get('retcode')
except Exception:
return
def component_list(self):
try:
stdout = self.client.component.list(job_id=self.job_id)
if stdout.get('retcode'):
self.error_log('component list: {}'.format(stdout.get('retmsg')) + '\n')
dsl_json = get_dict_from_file(self.dsl_path)
if len(stdout.get('data')['components']) == len(list(dsl_json['components'].keys())):
return stdout.get('retcode')
except Exception:
raise
def table_api(self, command, table_name):
if command == 'table/info':
try:
stdout = self.client.table.info(table_name=table_name['table_name'], namespace=table_name['namespace'])
if stdout.get('retcode'):
self.error_log('table info: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data')['namespace'] == table_name['namespace'] and \
stdout.get('data')['table_name'] == table_name['table_name']:
return stdout.get('retcode')
except Exception:
return
elif command == 'table/delete':
try:
stdout = self.client.table.delete(table_name=table_name['table_name'],
namespace=table_name['namespace'])
if stdout.get('retcode'):
self.error_log('table delete: {}'.format(stdout.get('retmsg')) + '\n')
stdout = self.client.table.delete(table_name=table_name['table_name'],
namespace=table_name['namespace'])
if stdout.get('retcode'):
return 0
except Exception:
return
def data_upload(self, upload_path, table_index=None):
upload_file = get_dict_from_file(upload_path)
upload_file['file'] = str(self.data_base_dir.joinpath(upload_file['file']).resolve())
upload_file['drop'] = 1
upload_file['use_local_data'] = 0
if table_index is not None:
upload_file['table_name'] = f'{upload_file["file"]}_{table_index}'
# upload_path = self.cache_directory + 'upload_file.json'
# with open(upload_path, 'w') as fp:
# json.dump(upload_file, fp)
try:
stdout = self.client.data.upload(config_data=upload_file, drop=1)
if stdout.get('retcode'):
self.error_log('data upload: {}'.format(stdout.get('retmsg')) + '\n')
return self.query_status(stdout.get("jobId"))
except Exception:
return
def data_download(self, table_name):
download_config = {
"table_name": table_name['table_name'],
"namespace": table_name['namespace'],
"output_path": 'download.csv',
}
try:
stdout = self.client.data.download(config_data=download_config)
if stdout.get('retcode'):
self.error_log('data download: {}'.format(stdout.get('retmsg')) + '\n')
return self.query_status(stdout.get("jobId"))
except Exception:
return
def data_upload_history(self, conf_file):
self.data_upload(conf_file, table_index=1)
try:
stdout = self.client.data.upload_history(limit=2)
if stdout.get('retcode'):
self.error_log('data upload history: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get('data')) == 2:
return stdout.get('retcode')
except Exception:
return
def tag_api(self, command, tag_name=None, new_tag_name=None):
if command == 'tag/query':
try:
stdout = self.client.tag.query(tag_name=tag_name)
if stdout.get('retcode'):
self.error_log('tag query: {}'.format(stdout.get('retmsg')) + '\n')
if not stdout.get('retcode'):
return stdout.get('data')['tags'][0]['name']
except Exception:
return
elif command == 'tag/create':
try:
stdout = self.client.tag.create(tag_name=tag_name)
self.error_log('tag create: {}'.format(stdout.get('retmsg')) + '\n')
if self.tag_api('tag/query', tag_name=tag_name) == tag_name:
return 0
except Exception:
return
elif command == 'tag/delete':
try:
stdout = self.client.tag.delete(tag_name=tag_name)
if stdout.get('retcode'):
self.error_log('tag delete: {}'.format(stdout.get('retmsg')) + '\n')
if not self.tag_api('tag/query', tag_name=tag_name):
return 0
except Exception:
return
elif command == 'tag/update':
try:
stdout = self.client.tag.update(tag_name=tag_name, new_tag_name=new_tag_name)
self.error_log('tag update: {}'.format(stdout.get('retmsg')) + '\n')
if self.tag_api('tag/query', tag_name=new_tag_name) == new_tag_name:
return 0
except Exception:
return
elif command == 'tag/list':
try:
stdout = self.client.tag.list(limit=1)
if stdout.get('retcode'):
self.error_log('tag list: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get('data')['tags']) == 1:
return stdout.get('retcode')
except Exception:
return
def model_api(self, command, remove_path=None, model_path=None, tag_name=None, homo_deploy_path=None,
homo_deploy_kube_config_path=None, remove=False, model_load_conf=None, servings=None):
if model_load_conf is not None:
model_load_conf["job_parameters"].update({"model_id": self.model_id,
"model_version": self.model_version})
if command == 'model/load':
try:
stdout = self.client.model.load(config_data=model_load_conf)
if stdout.get('retcode'):
self.error_log('model load: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'model/bind':
service_id = "".join([str(i) for i in np.random.randint(9, size=8)])
model_load_conf.update({"service_id": service_id, "servings": [servings]})
try:
stdout = self.client.model.bind(config_data=model_load_conf)
if stdout.get('retcode'):
self.error_log('model bind: {}'.format(stdout.get('retmsg')) + '\n')
else:
return stdout.get('retcode')
except Exception:
return
elif command == 'model/import':
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
"file": model_path,
"force_update": 1,
}
try:
remove_path = Path(remove_path + self.model_version)
if os.path.isdir(remove_path):
shutil.rmtree(remove_path)
stdout = self.client.model.import_model(config_data=config_data)
if not stdout.get('retcode') and os.path.isdir(remove_path):
return 0
else:
self.error_log('model import: {}'.format(stdout.get('retmsg')) + '\n')
except Exception:
return
elif command == 'model/export':
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
"output_path": self.output_path
}
# config_file_path = self.cache_directory + 'model_export.json'
# with open(config_file_path, 'w') as fp:
# json.dump(config_data, fp)
stdout = self.client.model.export_model(config_data=config_data)
if stdout.get('retcode'):
self.error_log('model export: {}'.format(stdout.get('retmsg')) + '\n')
else:
export_model_path = stdout.get('file')
return stdout.get('retcode'), export_model_path
elif command == 'model/migrate':
config_data = {
"job_parameters": {
"federated_mode": "MULTIPLE"
},
"migrate_initiator": {
"role": "guest",
"party_id": self.guest_party_id[0]
},
"role": {
"guest": self.guest_party_id,
"arbiter": self.arbiter_party_id,
"host": self.host_party_id
},
"migrate_role": {
"guest": self.guest_party_id,
"arbiter": self.arbiter_party_id,
"host": self.host_party_id
},
"execute_party": {
"guest": self.guest_party_id,
"arbiter": self.arbiter_party_id,
"host": self.host_party_id
},
"model_id": self.model_id,
"model_version": self.model_version,
"unify_model_version": self.job_id + '_01'
}
# config_file_path = self.cache_directory + 'model_migrate.json'
# with open(config_file_path, 'w') as fp:
# json.dump(config_data, fp)
try:
stdout = self.client.model.migrate(config_data=config_data)
if stdout.get('retcode'):
self.error_log('model migrate: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'model/homo/convert':
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
}
config_file_path = self.cache_directory + 'model_homo_convert.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
try:
stdout = self.client.model.homo_convert(conf_path=config_file_path)
if stdout.get('retcode'):
self.error_log('model homo convert: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'model/homo/deploy':
job_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
config_data = get_dict_from_file(homo_deploy_path)
config_data.update(job_data)
if homo_deploy_kube_config_path:
config_data['deployment_parameters']['config_file'] = homo_deploy_kube_config_path
config_file_path = self.cache_directory + 'model_homo_deploy.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
try:
stdout = self.client.model.homo_deploy(conf_path=config_file_path)
if stdout.get('retcode'):
self.error_log('model homo deploy: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'model_tag/model':
try:
stdout = self.client.model.tag_model(job_id=self.job_id, tag_name=tag_name, remove=remove)
if stdout.get('retcode'):
self.error_log('model tag model: {}'.format(stdout.get('retmsg')) + '\n')
return self.model_api('model_tag/list', tag_name=tag_name, remove=True)
except Exception:
return
elif command == 'model_tag/list':
try:
stdout = self.client.model.tag_list(job_id=self.job_id)
if stdout.get('retcode'):
self.error_log('model tag retrieve: {}'.format(stdout.get('retmsg')) + '\n')
if remove and len(stdout.get('data').get('tags')) == 0:
return stdout.get('retcode')
if stdout.get('data').get('tags')[0].get('name') == tag_name:
return stdout.get('retcode')
except Exception:
return
elif command == 'model/deploy':
try:
stdout = self.client.model.deploy(model_id=self.model_id, model_version=self.model_version)
if stdout.get('retcode'):
self.error_log('model deploy: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data')['model_id'] == self.model_id and\
stdout.get('data')['model_version'] != self.model_version:
self.model_id = stdout.get('data')['model_id']
self.model_version = stdout.get('data')['model_version']
self.job_id = stdout.get('data')['model_version']
return stdout.get('retcode')
except Exception:
return
elif command == 'model/conf':
try:
stdout = self.client.model.get_predict_conf(model_id=self.model_id, model_version=self.model_version)
if stdout.get('retcode'):
self.error_log('model conf: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data'):
if stdout.get('data')['job_parameters']['common']['model_id'] == self.model_id \
and stdout.get('data')['job_parameters']['common']['model_version'] == \
self.model_version and stdout.get('data')['initiator']['party_id'] == \
self.guest_party_id[0] and stdout.get('data')['initiator']['role'] == 'guest':
return stdout.get('retcode')
except Exception:
return
elif command == 'model/dsl':
try:
stdout = self.client.model.get_predict_dsl(model_id=self.model_id, model_version=self.model_version)
if stdout.get('retcode'):
self.error_log('model dsl: {}'.format(stdout.get('retmsg')) + '\n')
model_dsl_cpn = list(stdout.get('data')['components'].keys())
train_dsl_cpn = list(get_dict_from_file(self.dsl_path)['components'].keys())
if len([k for k in model_dsl_cpn if k in train_dsl_cpn]) == len(train_dsl_cpn):
return stdout.get('retcode')
except Exception:
return
elif command == 'model/query':
try:
stdout = self.client.model.get_model_info(model_id=self.model_id, model_version=self.model_version,
role="guest", party_id=self.guest_party_id[0])
if stdout.get('retcode'):
self.error_log('model query: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data')[0].get('f_model_id') == self.model_id and \
stdout.get('data')[0].get('f_model_version') == self.model_version and \
stdout.get('data')[0].get('f_role') == "guest" and \
stdout.get('data')[0].get('f_party_id') == str(self.guest_party_id[0]):
return stdout.get('retcode')
except Exception:
return
def query_status(self, job_id=None):
while True:
time.sleep(5)
status = self.query_job(job_id=job_id)
if status and status in ["waiting", "running", "success"]:
if status and status == "success":
return 0
else:
return
def set_config(self, guest_party_id, host_party_id, arbiter_party_id, path, component_name):
config = get_dict_from_file(path)
config["initiator"]["party_id"] = guest_party_id[0]
config["role"]["guest"] = guest_party_id
config["role"]["host"] = host_party_id
if "arbiter" in config["role"]:
config["role"]["arbiter"] = arbiter_party_id
self.guest_party_id = guest_party_id
self.host_party_id = host_party_id
self.arbiter_party_id = arbiter_party_id
conf_file_path = self.cache_directory + 'conf_file.json'
with open(conf_file_path, 'w') as fp:
json.dump(config, fp)
self.conf_path = conf_file_path
return config['component_parameters']['common'][component_name]['max_iter']
def judging_state(retcode):
if not retcode and retcode is not None:
return 'success'
else:
return 'failed'
def run_test_api(config_json, namespace):
output_path = './output/flow_test_data/'
os.makedirs(os.path.dirname(output_path), exist_ok=True)
test_api = TestModel(config_json['data_base_dir'], config_json['server_url'].split('//')[1],
config_json['component_name'], namespace)
test_api.dsl_path = config_json['train_dsl_path']
test_api.cache_directory = config_json['cache_directory']
test_api.output_path = str(os.path.abspath(output_path)) + '/'
conf_path = config_json['train_conf_path']
guest_party_id = config_json['guest_party_id']
host_party_id = config_json['host_party_id']
arbiter_party_id = config_json['arbiter_party_id']
upload_file_path = config_json['upload_file_path']
model_file_path = config_json['model_file_path']
conf_file = get_dict_from_file(upload_file_path)
serving_connect_bool = serving_connect(config_json['serving_setting'])
remove_path = str(config_json['data_base_dir']).split("python")[
0] + '/fateflow/model_local_cache/guest#{}#arbiter-{}#guest-{}#host-{}#model/'.format(
guest_party_id[0], arbiter_party_id[0], guest_party_id[0], host_party_id[0])
max_iter = test_api.set_config(guest_party_id, host_party_id, arbiter_party_id, conf_path,
config_json['component_name'])
data = PrettyTable()
data.set_style(ORGMODE)
data.field_names = ['data api name', 'status']
data.add_row(['data upload', judging_state(test_api.data_upload(upload_file_path))])
data.add_row(['data download', judging_state(test_api.data_download(conf_file))])
data.add_row(
['data upload history', judging_state(test_api.data_upload_history(upload_file_path))])
print(data.get_string(title="data api"))
table = PrettyTable()
table.set_style(ORGMODE)
table.field_names = ['table api name', 'status']
table.add_row(['table info', judging_state(test_api.table_api('table/info', conf_file))])
table.add_row(['delete table', judging_state(test_api.table_api('table/delete', conf_file))])
print(table.get_string(title="table api"))
job = PrettyTable()
job.set_style(ORGMODE)
job.field_names = ['job api name', 'status']
job.add_row(['job stop', judging_state(test_api.job_api('stop'))])
job.add_row(['job submit', judging_state(test_api.submit_job(stop=False))])
job.add_row(['job query', judging_state(False if test_api.query_job() == "success" else True)])
job.add_row(['job view', judging_state(test_api.job_api('view'))])
job.add_row(['job list', judging_state(test_api.job_api('list/job'))])
job.add_row(['job config', judging_state(test_api.job_config(max_iter=max_iter))])
job.add_row(['job log', judging_state(test_api.job_api('log'))])
job.add_row(['job dsl generate', judging_state(test_api.job_dsl_generate())])
print(job.get_string(title="job api"))
task = PrettyTable()
task.set_style(ORGMODE)
task.field_names = ['task api name', 'status']
task.add_row(['task list', judging_state(test_api.list_task())])
task.add_row(['task query', judging_state(test_api.query_task())])
print(task.get_string(title="task api"))
tag = PrettyTable()
tag.set_style(ORGMODE)
tag.field_names = ['tag api name', 'status']
tag.add_row(['create tag', judging_state(test_api.tag_api('tag/create', 'create_job_tag'))])
tag.add_row(['update tag', judging_state(test_api.tag_api('tag/update', 'create_job_tag', 'update_job_tag'))])
tag.add_row(['list tag', judging_state(test_api.tag_api('tag/list'))])
tag.add_row(
['query tag', judging_state(not test_api.tag_api('tag/query', 'update_job_tag') == 'update_job_tag')])
tag.add_row(['delete tag', judging_state(test_api.tag_api('tag/delete', 'update_job_tag'))])
print(tag.get_string(title="tag api"))
component = PrettyTable()
component.set_style(ORGMODE)
component.field_names = ['component api name', 'status']
component.add_row(['output data', judging_state(test_api.component_api('output/data'))])
component.add_row(['output table', judging_state(test_api.component_api('output/data/table'))])
component.add_row(['output model', judging_state(test_api.component_api('output/model'))])
component.add_row(['component parameters', judging_state(test_api.component_api('parameters', max_iter=max_iter))])
component.add_row(['component summary', judging_state(test_api.component_api('summary'))])
component.add_row(['component list', judging_state(test_api.component_list())])
component.add_row(['metrics', judging_state(test_api.component_api('metrics'))])
component.add_row(['metrics all', judging_state(test_api.component_api('metric/all'))])
model = PrettyTable()
model.set_style(ORGMODE)
model.field_names = ['model api name', 'status']
if config_json.get('component_is_homo'):
homo_deploy_path = config_json.get('homo_deploy_path')
homo_deploy_kube_config_path = config_json.get('homo_deploy_kube_config_path')
model.add_row(['model homo convert', judging_state(test_api.model_api('model/homo/convert'))])
model.add_row(['model homo deploy',
judging_state(test_api.model_api('model/homo/deploy',
homo_deploy_path=homo_deploy_path,
homo_deploy_kube_config_path=homo_deploy_kube_config_path))])
if not config_json.get('component_is_homo') and serving_connect_bool:
model_load_conf = get_dict_from_file(model_file_path)
model_load_conf["initiator"]["party_id"] = guest_party_id
model_load_conf["role"].update(
{"guest": [guest_party_id], "host": [host_party_id], "arbiter": [arbiter_party_id]})
model.add_row(['model load', judging_state(test_api.model_api('model/load', model_load_conf=model_load_conf))])
model.add_row(['model bind', judging_state(test_api.model_api('model/bind', model_load_conf=model_load_conf,
servings=config_json['serving_setting']))])
status, model_path = test_api.model_api('model/export')
model.add_row(['model export', judging_state(status)])
model.add_row(['model import', (judging_state(
test_api.model_api('model/import', remove_path=remove_path, model_path=model_path)))])
model.add_row(['tag model', judging_state(test_api.model_api('model_tag/model', tag_name='model_tag_create'))])
model.add_row(['tag list', judging_state(test_api.model_api('model_tag/list', tag_name='model_tag_create'))])
model.add_row(
['tag remove', judging_state(test_api.model_api('model_tag/model', tag_name='model_tag_create', remove=True))])
if serving_connect_bool:
model.add_row(
['model migrate', judging_state(test_api.model_api('model/migrate'))])
model.add_row(['model query', judging_state(test_api.model_api('model/query'))])
if not config_json.get('component_is_homo') and serving_connect_bool:
model.add_row(['model deploy', judging_state(test_api.model_api('model/deploy'))])
model.add_row(['model conf', judging_state(test_api.model_api('model/conf'))])
model.add_row(['model dsl', judging_state(test_api.model_api('model/dsl'))])
print(model.get_string(title="model api"))
component.add_row(['metrics delete', judging_state(test_api.component_api('metric/delete'))])
print(component.get_string(title="component api"))
queue = PrettyTable()
queue.set_style(ORGMODE)
queue.field_names = ['api name', 'status']
test_api.submit_job()
test_api.submit_job()
test_api.submit_job()
queue.add_row(['clean/queue', judging_state(test_api.job_api('clean/queue'))])
print(queue.get_string(title="queue job"))
print('Please check the error content: {}'.format(test_api.error_log(None)))
| 39,201 | 47.880299 | 119 |
py
|
FATE
|
FATE-master/python/fate_test/fate_test/flow_test/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/fate_client/setup.py
|
# -*- coding: utf-8 -*-
from setuptools import setup
packages = [
"flow_client",
"flow_client.flow_cli",
"flow_client.flow_cli.commands",
"flow_client.flow_cli.utils",
"flow_sdk",
"flow_sdk.client",
"flow_sdk.client.api",
"pipeline",
"pipeline.backend",
"pipeline.component",
"pipeline.component.nn",
"pipeline.component.nn.backend",
"pipeline.component.nn.backend.torch",
"pipeline.component.nn.models",
"pipeline.demo",
"pipeline.interface",
"pipeline.param",
"pipeline.parser",
"pipeline.runtime",
"pipeline.test",
"pipeline.utils",
"pipeline.utils.invoker",
]
package_data = {"": ["*"]}
install_requires = [
"click>=7.1.2,<8.0.0",
"loguru>=0.6.0",
"poetry>=0.12",
"pandas>=1.1.5",
"requests>=2.24.0,<3.0.0",
"requests_toolbelt>=0.9.1,<0.10.0",
"ruamel.yaml>=0.16.10,<0.17.0",
"setuptools>=65.5.1",
]
entry_points = {
"console_scripts": [
"flow = flow_client.flow:flow_cli",
"pipeline = pipeline.pipeline_cli:cli",
]
}
setup_kwargs = {
"name": "fate-client",
"version": "1.11.2",
"description": "Clients for FATE, including flow_client and pipeline",
"long_description": "FATE Client\n===========\n\nTools for interacting with FATE.\n\nquick start\n-----------\n\n1. (optional) create virtual env\n\n .. code-block:: bash\n\n python -m venv venv\n source venv/bin/activate\n\n\n2. install FATE Client\n\n .. code-block:: bash\n\n pip install fate-client\n\n\nPipeline\n========\n\nA high-level python API that allows user to design, start,\nand query FATE jobs in a sequential manner. For more information,\nplease refer to this `guide <./pipeline/README.rst>`__\n\nInitial Configuration\n---------------------\n\n1. Configure server information\n\n .. code-block:: bash\n\n # configure values in pipeline/config.yaml\n # use real ip address to configure pipeline\n pipeline init --ip 127.0.0.1 --port 9380 --log-directory ./logs\n\n\nFATE Flow Command Line Interface (CLI) v2\n=========================================\n\nA command line interface providing series of commands for user to design, start,\nand query FATE jobs. For more information, please refer to this `guide <./flow_client/README.rst>`__\n\nInitial Configuration\n---------------------\n\n1. Configure server information\n\n .. code-block:: bash\n\n # configure values in conf/service_conf.yaml\n flow init -c /data/projects/fate/conf/service_conf.yaml\n # use real ip address to initialize cli\n flow init --ip 127.0.0.1 --port 9380\n\n",
"author": "FederatedAI",
"author_email": "[email protected]",
"maintainer": None,
"maintainer_email": None,
"url": "https://fate.fedai.org/",
"packages": packages,
"package_data": package_data,
"install_requires": install_requires,
"entry_points": entry_points,
"python_requires": ">=3.6,<4.0",
}
setup(**setup_kwargs)
| 2,995 | 43.058824 | 1,418 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/constant.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import IntEnum
"""
class WorkMode(IntEnum):
STANDALONE = 0
CLUSTER = 1
class Backend(IntEnum):
EGGROLL = 0
SPARK_RABBITMQ = 1
SPARK_PULSAR = 2
def is_eggroll(self):
return self.value == self.EGGROLL
def is_spark_rabbitmq(self):
return self.value == self.SPARK_RABBITMQ
def is_spark_pulsar(self):
return self.value == self.SPARK_PULSAR
"""
class StoreEngine(IntEnum):
EGGROLL = 0
HDFS = 1
def is_hdfs(self):
return self.value == self.HDFS
def is_eggroll(self):
return self.value == self.EGGROLL
class RetCode(IntEnum):
SUCCESS = 0
EXCEPTION_ERROR = 100
PARAMETER_ERROR = 101
DATA_ERROR = 102
OPERATING_ERROR = 103
FEDERATED_ERROR = 104
CONNECTION_ERROR = 105
SERVER_ERROR = 500
class SchedulingStatusCode(object):
SUCCESS = 0
NO_RESOURCE = 1
PASS = 1
NO_NEXT = 2
HAVE_NEXT = 3
FAILED = 4
class FederatedSchedulingStatusCode(object):
SUCCESS = 0
PARTIAL = 1
FAILED = 2
class BaseStatus(object):
@classmethod
def status_list(cls):
return [cls.__dict__[k] for k in cls.__dict__.keys() if not callable(getattr(cls, k))
and not k.startswith("__")]
@classmethod
def contains(cls, status):
return status in cls.status_list()
class StatusSet(BaseStatus):
WAITING = 'waiting'
START = 'start'
RUNNING = "running"
CANCELED = "canceled"
TIMEOUT = "timeout"
FAILED = "failed"
SUCCESS = "success"
@classmethod
def get_level(cls, status):
return dict(zip(cls.status_list(), range(len(cls.status_list())))).get(status, None)
class JobStatus(BaseStatus):
WAITING = StatusSet.WAITING
RUNNING = StatusSet.RUNNING
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
SUCCESS = StatusSet.SUCCESS
class TaskSetStatus(BaseStatus):
WAITING = StatusSet.WAITING
RUNNING = StatusSet.RUNNING
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
SUCCESS = StatusSet.SUCCESS
class TaskStatus(BaseStatus):
WAITING = StatusSet.WAITING
RUNNING = StatusSet.RUNNING
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
SUCCESS = StatusSet.SUCCESS
class OngoingStatus(BaseStatus):
WAITING = StatusSet.WAITING
RUNNING = StatusSet.RUNNING
class InterruptStatus(BaseStatus):
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
class EndStatus(BaseStatus):
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
SUCCESS = StatusSet.SUCCESS
@staticmethod
def is_end_status(status):
return status in EndStatus.__dict__.keys()
class ModelStorage(object):
REDIS = "redis"
MYSQL = "mysql"
class ModelOperation(object):
STORE = "store"
RESTORE = "restore"
EXPORT = "export"
IMPORT = "import"
LOAD = "load"
BIND = "bind"
class ProcessRole(object):
SERVER = "server"
EXECUTOR = "executor"
class TagOperation(object):
CREATE = "create"
RETRIEVE = "retrieve"
UPDATE = "update"
DESTROY = "destroy"
LIST = "list"
class ProviderType(object):
FATE = "fate"
FATE_FLOW = "fate_flow"
FATE_SQL = "fate_sql"
| 4,017 | 21.076923 | 93 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/pipeline_cli.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from pathlib import Path
from ruamel import yaml
from flow_sdk.client import FlowClient
default_config = Path(__file__).parent.joinpath("config.yaml").resolve()
@click.group("Pipeline Config Tool")
def cli():
pass
@click.command(name="init")
@click.option("-c", "--pipeline-conf-path", "config_path", type=click.Path(exists=True),
help="Path to pipeline configuration file.")
@click.option("-d", "--log-directory", type=click.Path(),
help="Path to pipeline logs directory.")
@click.option("--ip", type=click.STRING, help="Fate Flow server ip address.")
@click.option("--port", type=click.INT, help="Fate Flow server port.")
@click.option("--app-key", type=click.STRING, help="app key for request to Fate Flow server")
@click.option("--secret-key", type=click.STRING, help="secret key for request to Fate Flow server")
@click.option("-r", "--system-user", type=click.STRING, help="system user role")
def _init(**kwargs):
"""
\b
- DESCRIPTION:
Pipeline Config Command. User can choose to provide path to conf file,
or provide ip address and http port of a valid fate flow server. Optionally,
pipeline log directory can be set to arbitrary location. Default log directory is
pipeline/logs. Notice that, if both conf file and specifications are provided,
settings in conf file are ignored.
\b
- USAGE:
pipeline init -c config.yaml
pipeline init --ip 10.1.2.3 --port 9380 --log-directory ./logs --system-user guest
"""
config_path = kwargs.get("config_path")
ip = kwargs.get("ip")
port = kwargs.get("port")
log_directory = kwargs.get("log_directory")
system_user = kwargs.get("system_user")
app_key = kwargs.get("app_key")
secret_key = kwargs.get("secret_key")
if config_path is None and (ip is None or port is None):
print(
"\nPipeline configuration failed. \nPlease provides configuration file path "
"or server http ip address & port information."
)
return
if config_path is None:
config_path = default_config
with Path(config_path).open("r") as fin:
config = yaml.safe_load(fin)
if ip:
config["ip"] = ip
if port:
config["port"] = port
if log_directory:
config["log_directory"] = Path(log_directory).resolve().__str__()
if app_key:
config["app_key"] = app_key
if secret_key:
config["secret_key"] = secret_key
if system_user:
system_user = system_user.lower()
if system_user not in ["guest", "host", "arbiter"]:
raise ValueError(f"system_user {system_user} is not valid. Must be one of (guest, host, arbiter)")
config["system_setting"] = {"role": system_user}
with default_config.open("w") as fout:
yaml.dump(config, fout, Dumper=yaml.RoundTripDumper)
print("Pipeline configuration succeeded.")
@click.group("config", help="pipeline config tool")
def config_group():
"""
pipeline config
"""
pass
@config_group.command(name="show")
def _show():
"""
\b
- DESCRIPTION:
Show pipeline config details for Flow server.
\b
- USAGE:
pipeline config show
"""
with Path(default_config).open("r") as fin:
config = yaml.safe_load(fin)
click.echo(f"\nPipeline Config: {yaml.dump(config)}")
@config_group.command(name="check")
def _check():
"""
\b
- DESCRIPTION:
Check for Flow server status and Flow version.
\b
- USAGE:
pipeline config check
"""
from pipeline.backend import config as conf
client = FlowClient(ip=conf.PipelineConfig.IP, port=conf.PipelineConfig.PORT, version=conf.SERVER_VERSION)
version = client.remote_version.fate_flow()
if version is None:
click.echo(f"Flow server not responsive. Please check flow server ip and port setting.")
else:
click.echo(f"Flow server status normal, Flow version: {version}")
cli.add_command(_init)
cli.add_command(config_group)
if __name__ == '__main__':
cli()
| 4,829 | 30.986755 | 110 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/__init__.py
|
try:
from pipeline.component.nn.backend.torch.import_hook import fate_torch_hook
from pipeline.component.nn.backend import torch as fate_torch
except ImportError:
fate_torch_hook, fate_torch = None, None
except ValueError:
fate_torch_hook, fate_torch = None, None
__all__ = ['fate_torch_hook', 'fate_torch']
| 325 | 31.6 | 79 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/runtime/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/fate_client/pipeline/runtime/entity.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from pipeline.utils.tools import extract_explicit_parameter
class JobParameters(object):
@extract_explicit_parameter
def __init__(
self,
job_type="train",
computing_engine=None,
federation_engine=None,
storage_engine=None,
engines_address=None,
federated_mode=None,
federation_info=None,
task_parallelism=None,
federated_status_collect_type=None,
federated_data_exchange_type=None,
model_id=None,
model_version=None,
dsl_version=None,
timeout=None,
eggroll_run=None,
spark_run=None,
adaptation_parameters=None,
**kwargs):
explicit_parameters = kwargs["explict_parameters"]
for param_key, param_value in explicit_parameters.items():
if param_key == "backend":
print("Please don't use parameter 'backend' in FATE version >= 1.7.")
elif param_key == "work_mode":
print("Please don't use parameter 'work_mode' in FATE version >= 1.7.")
else:
setattr(self, param_key, param_value)
self.__party_instance = {}
self._job_param = {}
def get_party_instance(self, role="guest", party_id=None):
if role not in ["guest", "host", "arbiter"]:
raise ValueError("Role should be one of guest/host/arbiter")
if party_id is not None:
if isinstance(party_id, list):
for _id in party_id:
if not isinstance(_id, int) or _id <= 0:
raise ValueError("party id should be positive integer")
elif not isinstance(party_id, int) or party_id <= 0:
raise ValueError("party id should be positive integer")
if role not in self.__party_instance:
self.__party_instance[role] = {}
self.__party_instance[role]["party"] = {}
party_key = party_id
if isinstance(party_id, list):
party_key = "|".join(map(str, party_id))
if party_key not in self.__party_instance[role]["party"]:
self.__party_instance[role]["party"][party_key] = None
if not self.__party_instance[role]["party"][party_key]:
party_instance = copy.deepcopy(self)
self.__party_instance[role]["party"][party_key] = party_instance
return self.__party_instance[role]["party"][party_key]
def job_param(self, **kwargs):
new_kwargs = copy.deepcopy(kwargs)
for attr in new_kwargs:
setattr(self, attr, new_kwargs[attr])
self._job_param[attr] = new_kwargs[attr]
def get_job_param(self):
return self._job_param
def get_common_param_conf(self):
common_param_conf = {}
for attr in self.__dict__:
if attr.startswith("_"):
continue
common_param_conf[attr] = getattr(self, attr)
return common_param_conf
def get_role_param_conf(self, roles=None):
role_param_conf = {}
if not self.__party_instance:
return role_param_conf
for role in self.__party_instance:
role_param_conf[role] = {}
if None in self.__party_instance[role]["party"]:
role_all_party_conf = self.__party_instance[role]["party"][None].get_job_param()
if "all" not in role_param_conf:
role_param_conf[role]["all"] = {}
role_param_conf[role]["all"] = role_all_party_conf
valid_partyids = roles.get(role)
for party_id in self.__party_instance[role]["party"]:
if not party_id:
continue
if isinstance(party_id, int):
party_key = str(valid_partyids.index(party_id))
else:
party_list = list(map(int, party_id.split("|", -1)))
party_key = "|".join(map(str, [valid_partyids.index(party) for party in party_list]))
party_inst = self.__party_instance[role]["party"][party_id]
if party_key not in role_param_conf:
role_param_conf[role][party_key] = {}
role_param_conf[role][party_key] = party_inst.get_job_param()
return role_param_conf
def get_config(self, *args, **kwargs):
"""need to implement"""
roles = kwargs["roles"]
common_param_conf = self.get_common_param_conf()
role_param_conf = self.get_role_param_conf(roles)
conf = {}
if common_param_conf:
conf['common'] = common_param_conf
if role_param_conf:
conf["role"] = role_param_conf
return conf
| 5,453 | 34.415584 | 105 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/backend/task_info.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pipeline.utils.logger import LOGGER
class TaskInfo(object):
def __init__(self, jobid, component, job_client, role='guest', party_id=9999):
self._jobid = jobid
self._component = component
self._job_client = job_client
self._party_id = party_id
self._role = role
@LOGGER.catch(onerror=lambda _: sys.exit(1))
def get_output_data(self, limits=None, to_pandas=True):
'''
gets downloaded data of arbitrary component
Parameters
----------
limits: int, None, default None. Maximum number of lines returned, including header. If None, return all lines.
to_pandas: bool, default True.
Returns
-------
single output example: pandas.DataFrame
multiple output example:
{
train_data: train_data_df,
validate_data: validate_data_df,
test_data: test_data_df
}
'''
return self._job_client.get_output_data(self._jobid, self._component.name, self._role,
self._party_id, limits, to_pandas=to_pandas)
@LOGGER.catch(onerror=lambda _: sys.exit(1))
def get_model_param(self):
'''
get fitted model parameters
Returns
-------
dict
'''
return self._job_client.get_model_param(self._jobid, self._component.name, self._role, self._party_id)
@LOGGER.catch(onerror=lambda _: sys.exit(1))
def get_output_data_table(self):
'''
get output data table information, including table name and namespace, as given by flow client
Returns
-------
dict
'''
return self._job_client.get_output_data_table(self._jobid, self._component.name, self._role, self._party_id)
@LOGGER.catch(onerror=lambda _: sys.exit(1))
def get_summary(self):
'''
get module summary of arbitrary component
Returns
-------
dict
'''
return self._job_client.get_summary(self._jobid, self._component.name, self._role, self._party_id)
| 2,750 | 32.962963 | 119 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/backend/config.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
from ruamel import yaml
from pipeline.constant import JobStatus
__all__ = ["JobStatus", "VERSION", "SERVER_VERSION", "TIME_QUERY_FREQS", "Role", "StatusCode",
"LogPath", "LogFormat", "IODataType", "PipelineConfig"]
VERSION = 2
SERVER_VERSION = "v1"
TIME_QUERY_FREQS = 1
MAX_RETRY = 3
def get_default_config() -> dict:
with (Path(__file__).parent.parent / "config.yaml").open(encoding="utf-8") as f:
return yaml.safe_load(f)
class Role(object):
LOCAL = "local"
GUEST = "guest"
HOST = "host"
ARBITER = "arbiter"
@classmethod
def support_roles(cls):
roles = set()
for role_key, role in cls.__dict__.items():
if role_key.startswith("__") and isinstance(role_key, str):
continue
roles.add(role)
return roles
class StatusCode(object):
SUCCESS = 0
FAIL = 1
CANCELED = 2
class IODataType:
SINGLE = "data"
TRAIN = "train_data"
VALIDATE = "validate_data"
TEST = "test_data"
class PipelineConfigMeta(type):
def __getattr__(cls, name):
if name not in cls._keys:
raise AttributeError(f"type object '{cls.__name__}' has no attribute '{name}'")
if cls._conf is None:
cls._conf = get_default_config()
value = cls._conf.get(name.lower())
if value is not None:
return value
if name in {"IP", "PORT"}:
raise ValueError(
f"{name} not configured. "
"Please use command line tool `pipeline init` to set it."
)
return cls._defaults.get(name)
class PipelineConfig(metaclass=PipelineConfigMeta):
_conf = None
_keys = {"IP", "PORT", "APP_KEY", "SECRET_KEY", "CONSOLE_DISPLAY_LOG", "SYSTEM_SETTING"}
_defaults = {
"CONSOLE_DISPLAY_LOG": True,
"SYSTEM_SETTING": {"role": None},
}
class LogPath(object):
@classmethod
def log_directory(cls):
conf = get_default_config()
# log_directory = os.environ.get("FATE_PIPELINE_LOG", "")
log_directory = conf.get("log_directory")
if log_directory:
log_directory = Path(log_directory).resolve()
else:
log_directory = Path(__file__).parent.parent.joinpath("logs")
try:
log_directory.mkdir(parents=True, exist_ok=True)
except Exception as e:
raise RuntimeError(f"can't create log directory for pipeline: {log_directory}") from e
if not Path(log_directory).resolve().is_dir():
raise NotADirectoryError(f"provided log directory {log_directory} is not a directory.")
return log_directory
DEBUG = 'DEBUG.log'
INFO = 'INFO.log'
ERROR = 'ERROR.log'
class LogFormat(object):
SIMPLE = '<green>[{time:HH:mm:ss}]</green><level>{message}</level>'
NORMAL = '<green>{time:YYYY-MM-DD HH:mm:ss}</green> | ' \
'<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>'
| 3,636 | 27.193798 | 99 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/backend/_operation.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from pipeline.backend.config import WorkMode
from pipeline.utils.logger import LOGGER
class OnlineCommand(object):
def __init__(self, pipeline_obj):
self.pipeline_obj = pipeline_obj
"""
def _feed_online_conf(self):
conf = {"initiator": self.pipeline_obj._get_initiator_conf(),
"role": self.pipeline_obj._roles}
predict_model_info = self.pipeline_obj.get_predict_model_info()
train_work_mode = self.pipeline_obj.get_train_conf().get("job_parameters").get("common").get("work_mode")
if train_work_mode != WorkMode.CLUSTER:
raise ValueError(f"to use FATE serving online inference service, work mode must be CLUSTER.")
conf["job_parameters"] = {"model_id": predict_model_info.model_id,
"model_version": predict_model_info.model_version,
"work_mode": WorkMode.CLUSTER}
return conf
"""
def _feed_online_conf(self):
conf = {"initiator": self.pipeline_obj._get_initiator_conf(),
"role": self.pipeline_obj._roles}
predict_model_info = self.pipeline_obj.get_predict_model_info()
conf["job_parameters"] = {"model_id": predict_model_info.model_id,
"model_version": predict_model_info.model_version}
return conf
@LOGGER.catch(reraise=True)
def load(self, file_path=None):
if not self.pipeline_obj.is_deploy():
raise ValueError(f"to load model for online inference, must deploy components first.")
file_path = file_path if file_path else ""
load_conf = self._feed_online_conf()
load_conf["job_parameters"]["file_path"] = file_path
self.pipeline_obj._job_invoker.load_model(load_conf)
self.pipeline_obj._load = True
@LOGGER.catch(reraise=True)
def bind(self, service_id, *servings):
if not self.pipeline_obj.is_deploy() or not self.pipeline_obj.is_load():
raise ValueError(f"to bind model to online service, must deploy and load model first.")
bind_conf = self._feed_online_conf()
bind_conf["service_id"] = service_id
bind_conf["servings"] = list(servings)
self.pipeline_obj._job_invoker.bind_model(bind_conf)
class ModelConvert(object):
def __init__(self, pipeline_obj):
self.pipeline_obj = pipeline_obj
def _feed_homo_conf(self, framework_name):
model_info = self.pipeline_obj.get_model_info()
conf = {"role": self.pipeline_obj._initiator.role,
"party_id": self.pipeline_obj._initiator.party_id,
"model_id": model_info.model_id,
"model_version": model_info.model_version
}
if framework_name:
conf["framework_name"] = framework_name
return conf
@LOGGER.catch(reraise=True)
def convert(self, framework_name=None):
if self.pipeline_obj._train_dsl is None:
raise ValueError("Before converting homo model, training should be finished!!!")
conf = self._feed_homo_conf(framework_name)
res_dict = self.pipeline_obj._job_invoker.convert_homo_model(conf)
return res_dict
| 3,836 | 41.633333 | 113 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/backend/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/backend/pipeline.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import getpass
import json
import pickle
import time
from types import SimpleNamespace
from pipeline.backend.config import Role
from pipeline.backend.config import StatusCode
from pipeline.backend.config import VERSION
from pipeline.backend.config import PipelineConfig
from pipeline.backend._operation import OnlineCommand, ModelConvert
from pipeline.backend.task_info import TaskInfo
from pipeline.component.component_base import Component
from pipeline.component.reader import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.interface import Cache
from pipeline.utils import tools
from pipeline.utils.invoker.job_submitter import JobInvoker
from pipeline.utils.logger import LOGGER
from pipeline.runtime.entity import JobParameters
class PipeLine(object):
def __init__(self):
self._create_time = time.asctime(time.localtime(time.time()))
self._initiator = None
self._roles = {}
self._components = {}
self._components_input = {}
self._train_dsl = {}
self._predict_dsl = {}
self._train_conf = {}
self._predict_conf = {}
self._upload_conf = []
self._cur_state = None
self._job_invoker = JobInvoker()
self._train_job_id = None
self._predict_job_id = None
self._fit_status = None
self._train_board_url = None
self._model_info = None
self._predict_model_info = None
self._train_components = {}
self._stage = "fit"
self._data_to_feed_in_prediction = None
self._predict_pipeline = []
self._deploy = False
self._system_role = PipelineConfig.SYSTEM_SETTING.get("role")
self.online = OnlineCommand(self)
self._load = False
self.model_convert = ModelConvert(self)
self._global_job_provider = None
@LOGGER.catch(reraise=True)
def set_initiator(self, role, party_id):
self._initiator = SimpleNamespace(role=role, party_id=party_id)
# for predict pipeline
if self._predict_pipeline:
predict_pipeline = self._predict_pipeline[0]["pipeline"]
predict_pipeline._initiator = SimpleNamespace(role=role, party_id=party_id)
return self
def get_component_list(self):
return copy.copy(list(self._components.keys()))
def restore_roles(self, initiator, roles):
self._initiator = initiator
self._roles = roles
@LOGGER.catch(reraise=True)
def get_predict_meta(self):
if self._fit_status != StatusCode.SUCCESS:
raise ValueError("To get predict meta, please fit successfully")
return {"predict_dsl": self._predict_dsl,
"train_conf": self._train_conf,
"initiator": self._initiator,
"roles": self._roles,
"model_info": self._model_info,
"components": self._components,
"stage": self._stage
}
def get_predict_model_info(self):
return copy.deepcopy(self._predict_model_info)
def get_model_info(self):
return copy.deepcopy(self._model_info)
def get_train_dsl(self):
return copy.deepcopy(self._train_dsl)
def get_train_conf(self):
return copy.deepcopy(self._train_conf)
def get_predict_dsl(self):
return copy.deepcopy(self._predict_dsl)
def get_predict_conf(self):
return copy.deepcopy(self._predict_conf)
def get_upload_conf(self):
return copy.deepcopy(self._upload_conf)
def _get_initiator_conf(self):
if self._initiator is None:
raise ValueError("Please set initiator of PipeLine")
initiator_conf = {"role": self._initiator.role,
"party_id": self._initiator.party_id}
return initiator_conf
def set_global_job_provider(self, provider):
self._global_job_provider = provider
return self
@LOGGER.catch(reraise=True)
def set_roles(self, guest=None, host=None, arbiter=None, **kwargs):
local_parameters = locals()
support_roles = Role.support_roles()
for role, party_id in local_parameters.items():
if role == "self":
continue
if not local_parameters.get(role):
continue
if role not in support_roles:
raise ValueError("Current role not support {}, support role list {}".format(role, support_roles))
party_id = local_parameters.get(role)
self._roles[role] = []
if isinstance(party_id, int):
self._roles[role].append(party_id)
elif isinstance(party_id, list):
self._roles[role].extend(party_id)
else:
raise ValueError("role: {}'s party_id should be an integer or a list of integer".format(role))
# update role config for compiled pipeline
if self._train_conf:
if role in self._train_conf["role"]:
self._train_conf["role"][role] = self._roles[role]
if self._predict_pipeline:
predict_pipeline = self._predict_pipeline[0]["pipeline"]
predict_pipeline._roles = self._roles
return self
def _get_role_conf(self):
return self._roles
def _get_party_index(self, role, party_id):
if role not in self._roles:
raise ValueError("role {} does not setting".format(role))
if party_id not in self._roles[role]:
raise ValueError("role {} does not init setting with the party_id {}".format(role, party_id))
return self._roles[role].index(party_id)
@LOGGER.catch(reraise=True)
def add_component(self, component, data=None, model=None, cache=None):
if isinstance(component, PipeLine):
if component.is_deploy() is False:
raise ValueError("To use a training pipeline object as predict component, should deploy model first")
if model:
raise ValueError("pipeline should not have model as input!")
if not data:
raise ValueError("To use pipeline as a component, please set data input")
self._stage = "predict"
self._predict_pipeline.append({"pipeline": component, "data": data.predict_input})
meta = component.get_predict_meta()
self.restore_roles(meta.get("initiator"), meta.get("roles"))
return self
if not isinstance(component, Component):
raise ValueError(
"To add a component to pipeline, component {} should be a Component object".format(component))
if component.name in self._components:
raise Warning("component {} is added before".format(component.name))
self._components[component.name] = component
if data is not None:
if not isinstance(data, Data):
raise ValueError("data input of component {} should be passed by data object".format(component.name))
attrs_dict = vars(data)
self._components_input[component.name] = {"data": {}}
for attr, val in attrs_dict.items():
if not attr.endswith("data"):
continue
if val is None:
continue
data_key = attr.strip("_")
if isinstance(val, list):
self._components_input[component.name]["data"][data_key] = val
else:
self._components_input[component.name]["data"][data_key] = [val]
if model is not None:
if not isinstance(model, Model):
raise ValueError("model input of component {} should be passed by model object".format(component.name))
attrs_dict = vars(model)
for attr, val in attrs_dict.items():
if not attr.endswith("model"):
continue
if val is None:
continue
if isinstance(val, list):
self._components_input[component.name][attr.strip("_")] = val
else:
self._components_input[component.name][attr.strip("_")] = [val]
if cache is not None:
if not isinstance(cache, Cache):
raise ValueError("cache input of component {} should be passed by cache object".format(component.name))
attr = cache.cache
if not isinstance(attr, list):
attr = [attr]
self._components_input[component.name]["cache"] = attr
return self
@LOGGER.catch(reraise=True)
def add_upload_data(self, file, table_name, namespace, head=1, partition=16,
id_delimiter=",", extend_sid=False, auto_increasing_sid=False, **kargs):
data_conf = {"file": file,
"table_name": table_name,
"namespace": namespace,
"head": head,
"partition": partition,
"id_delimiter": id_delimiter,
"extend_sid": extend_sid,
"auto_increasing_sid": auto_increasing_sid, **kargs}
self._upload_conf.append(data_conf)
def _get_task_inst(self, job_id, name, init_role, party_id):
component = None
if name in self._components:
component = self._components[name]
if component is None:
if self._stage != "predict":
raise ValueError(f"Component {name} does not exist")
training_meta = self._predict_pipeline[0]["pipeline"].get_predict_meta()
component = training_meta.get("components").get(name)
if component is None:
raise ValueError(f"Component {name} does not exist")
return TaskInfo(jobid=job_id,
component=component,
job_client=self._job_invoker,
role=init_role,
party_id=party_id)
@LOGGER.catch(reraise=True)
def get_component(self, component_names=None):
job_id = self._train_job_id
if self._cur_state != "fit":
job_id = self._predict_job_id
init_role = self._initiator.role
party_id = self._initiator.party_id
if not component_names:
component_tasks = {}
for name in self._components:
component_tasks[name] = self._get_task_inst(job_id, name, init_role, party_id)
return component_tasks
elif isinstance(component_names, str):
return self._get_task_inst(job_id, component_names, init_role, party_id)
elif isinstance(component_names, list):
component_tasks = []
for name in component_names:
component_tasks.append(self._get_task_inst(job_id, name, init_role, party_id))
return component_tasks
def _construct_train_dsl(self):
if self._global_job_provider:
self._train_dsl["provider"] = self._global_job_provider
self._train_dsl["components"] = {}
for name, component in self._components.items():
component_dsl = {"module": component.module}
if name in self._components_input:
component_dsl["input"] = self._components_input[name]
if hasattr(component, "output"):
component_dsl["output"] = {}
output_attrs = {"data": "data_output",
"model": "model_output",
"cache": "cache_output"}
for output_key, attr in output_attrs.items():
if hasattr(component.output, attr):
component_dsl["output"][output_key] = getattr(component.output, attr)
provider_name = None
provider_version = None
if not hasattr(component, "source_provider"):
LOGGER.warning(f"Can not retrieval source provider of component {name}, "
f"refer to pipeline/component/component_base.py")
else:
provider_name = getattr(component, "source_provider")
if provider_name is None:
LOGGER.warning(f"Source provider of component {name} is None, "
f"refer to pipeline/component/component_base.py")
if hasattr(component, "provider"):
provider = getattr(component, "provider")
if provider is not None:
if provider.find("@") != -1:
provider_name, provider_version = provider.split("@", -1)
else:
provider_name = provider
# component_dsl["provider"] = provider
if getattr(component, "provider_version") is not None:
provider_version = getattr(component, "provider_version")
if provider_name and provider_version:
component_dsl["provider"] = "@".join([provider_name, provider_version])
elif provider_name:
component_dsl["provider"] = provider_name
self._train_dsl["components"][name] = component_dsl
if not self._train_dsl:
raise ValueError("there are no components to train")
LOGGER.debug(f"train_dsl: {self._train_dsl}")
def _construct_train_conf(self):
self._train_conf["dsl_version"] = VERSION
self._train_conf["initiator"] = self._get_initiator_conf()
self._train_conf["role"] = self._roles
self._train_conf["job_parameters"] = {"common": {"job_type": "train"}}
for name, component in self._components.items():
param_conf = component.get_config(version=VERSION, roles=self._roles)
if "common" in param_conf:
common_param_conf = param_conf["common"]
if "component_parameters" not in self._train_conf:
self._train_conf["component_parameters"] = {}
if "common" not in self._train_conf["component_parameters"]:
self._train_conf["component_parameters"]["common"] = {}
self._train_conf["component_parameters"]["common"].update(common_param_conf)
if "role" in param_conf:
role_param_conf = param_conf["role"]
if "component_parameters" not in self._train_conf:
self._train_conf["component_parameters"] = {}
if "role" not in self._train_conf["component_parameters"]:
self._train_conf["component_parameters"]["role"] = {}
self._train_conf["component_parameters"]["role"] = tools.merge_dict(
role_param_conf, self._train_conf["component_parameters"]["role"])
LOGGER.debug(f"self._train_conf: \n {json.dumps(self._train_conf, indent=4, ensure_ascii=False)}")
return self._train_conf
def _construct_upload_conf(self, data_conf):
upload_conf = copy.deepcopy(data_conf)
# upload_conf["work_mode"] = work_mode
return upload_conf
def describe(self):
LOGGER.info(f"Pipeline Stage is {self._stage}")
LOGGER.info("DSL is:")
if self._stage == "fit":
LOGGER.info(f"{self._train_dsl}")
else:
LOGGER.info(f"{self._predict_dsl}")
LOGGER.info(f"Pipeline Create Time: {self._create_time}")
def get_train_job_id(self):
return self._train_job_id
def get_predict_job_id(self):
return self._predict_job_id
def _set_state(self, state):
self._cur_state = state
def set_job_invoker(self, job_invoker):
self._job_invoker = job_invoker
@LOGGER.catch(reraise=True)
def compile(self):
self._construct_train_dsl()
self._train_conf = self._construct_train_conf()
if self._stage == "predict":
predict_pipeline = self._predict_pipeline[0]["pipeline"]
data_info = self._predict_pipeline[0]["data"]
meta = predict_pipeline.get_predict_meta()
if meta["stage"] == "predict":
raise ValueError(
"adding predict pipeline objects'stage is predict, a predict pipeline cannot be an input component")
self._model_info = meta["model_info"]
predict_pipeline_dsl = meta["predict_dsl"]
predict_pipeline_conf = meta["train_conf"]
if not predict_pipeline_dsl:
raise ValueError(
"Cannot find deploy model in predict pipeline, to use a pipeline as input component, "
"it should be deploy first")
for cpn in self._train_dsl["components"]:
if cpn in predict_pipeline_dsl["components"]:
raise ValueError(
f"component name {cpn} exist in predict pipeline's deploy component, this is not support")
if "algorithm_parameters" in predict_pipeline_conf:
algo_param = predict_pipeline_conf["algorithm_parameters"]
if "algorithm_parameters" in self._train_conf:
for key, value in algo_param.items():
if key not in self._train_conf["algorithm_parameters"]:
self._train_conf["algorithm_parameters"][key] = value
else:
self._train_conf["algorithm_parameters"] = algo_param
if "role_parameters" in predict_pipeline_conf:
role_param = predict_pipeline_conf["role_parameters"]
for cpn in self._train_dsl["components"]:
for role, param in role_param.items():
for idx in param:
if param[idx].get(cpn) is not None:
del predict_pipeline_conf["role_parameters"][role][idx][cpn]
if "role_parameters" not in self._train_conf:
self._train_conf["role_parameters"] = {}
self._train_conf["role_parameters"] = tools.merge_dict(self._train_conf["role_parameters"],
predict_pipeline_conf["role_parameters"])
self._predict_dsl = tools.merge_dict(predict_pipeline_dsl, self._train_dsl)
for data_field, val in data_info.items():
cpn = data_field.split(".", -1)[0]
dataset = data_field.split(".", -1)[1]
if not isinstance(val, list):
val = [val]
if "input" not in self._predict_dsl["components"][cpn]:
self._predict_dsl["components"][cpn]["input"] = {}
if 'data' not in self._predict_dsl["components"][cpn]["input"]:
self._predict_dsl["components"][cpn]["input"]["data"] = {}
self._predict_dsl["components"][cpn]["input"]["data"][dataset] = val
return self
@LOGGER.catch(reraise=True)
def _check_duplicate_setting(self, submit_conf):
system_role = self._system_role
if "role" in submit_conf["job_parameters"]:
role_conf = submit_conf["job_parameters"]["role"]
system_role_conf = role_conf.get(system_role, {})
for party, conf in system_role_conf.items():
if conf.get("user"):
raise ValueError(f"system role {system_role}'s user info already set. Please check.")
def _feed_job_parameters(self, conf, job_type=None,
model_info=None, job_parameters=None):
submit_conf = copy.deepcopy(conf)
LOGGER.debug(f"submit conf type is {type(submit_conf)}")
if job_parameters:
submit_conf["job_parameters"] = job_parameters.get_config(roles=self._roles)
if "common" not in submit_conf["job_parameters"]:
submit_conf["job_parameters"]["common"] = {}
submit_conf["job_parameters"]["common"]["job_type"] = job_type
if model_info is not None:
submit_conf["job_parameters"]["common"]["model_id"] = model_info.model_id
submit_conf["job_parameters"]["common"]["model_version"] = model_info.model_version
if self._system_role:
self._check_duplicate_setting(submit_conf)
init_role = self._initiator.role
idx = str(self._roles[init_role].index(self._initiator.party_id))
if "role" not in submit_conf["job_parameters"]:
submit_conf["job_parameters"]["role"] = {}
if init_role not in submit_conf["job_parameters"]["role"]:
submit_conf["job_parameters"]["role"][init_role] = {}
if idx not in submit_conf["job_parameters"]["role"][init_role]:
submit_conf["job_parameters"]["role"][init_role][idx] = {}
submit_conf["job_parameters"]["role"][init_role][idx].update({"user": getpass.getuser()})
return submit_conf
def _filter_out_deploy_component(self, predict_conf):
if "component_parameters" not in predict_conf:
return predict_conf
if "common" in predict_conf["component_parameters"]:
cpns = list(predict_conf["component_parameters"]["common"])
for cpn in cpns:
if cpn not in self._components.keys():
del predict_conf["component_parameters"]["common"]
if "role" in predict_conf["component_parameters"]:
roles = predict_conf["component_parameters"]["role"].keys()
for role in roles:
role_params = predict_conf["component_parameters"]["role"].get(role)
indexs = role_params.keys()
for idx in indexs:
cpns = role_params[idx].keys()
for cpn in cpns:
if cpn not in self._components.keys():
del role_params[idx][cpn]
if not role_params[idx]:
del role_params[idx]
if role_params:
predict_conf["component_parameters"]["role"][role] = role_params
else:
del predict_conf["component_parameters"]["role"][role]
return predict_conf
@LOGGER.catch(reraise=True)
def fit(self, job_parameters=None, callback_func=None):
if self._stage == "predict":
raise ValueError("This pipeline is constructed for predicting, cannot use fit interface")
if job_parameters and not isinstance(job_parameters, JobParameters):
raise ValueError("input parameter of fit function should be JobParameters object")
LOGGER.debug(f"in fit, _train_conf is: \n {json.dumps(self._train_conf)}")
self._set_state("fit")
training_conf = self._feed_job_parameters(self._train_conf, job_type="train", job_parameters=job_parameters)
self._train_conf = training_conf
LOGGER.debug(f"train_conf is: \n {json.dumps(training_conf, indent=4, ensure_ascii=False)}")
self._train_job_id, detail_info = self._job_invoker.submit_job(self._train_dsl, training_conf, callback_func)
self._train_board_url = detail_info["board_url"]
self._model_info = SimpleNamespace(model_id=detail_info["model_info"]["model_id"],
model_version=detail_info["model_info"]["model_version"])
self._fit_status = self._job_invoker.monitor_job_status(self._train_job_id,
self._initiator.role,
self._initiator.party_id)
@LOGGER.catch(reraise=True)
def update_model_info(self, model_id=None, model_version=None):
# predict pipeline
if self._predict_pipeline:
predict_pipeline = self._predict_pipeline[0]["pipeline"]
if model_id:
predict_pipeline._model_info.model_id = model_id
if model_version:
predict_pipeline._model_info.model_version = model_version
return self
# train pipeline
original_model_id, original_model_version = None, None
if self._model_info is not None:
original_model_id, original_model_version = self._model_info.model_id, self._model_info.model_version
new_model_id = model_id if model_id is not None else original_model_id
new_model_version = model_version if model_version is not None else original_model_version
if new_model_id is None and new_model_version is None:
return self
self._model_info = SimpleNamespace(model_id=new_model_id, model_version=new_model_version)
return self
@LOGGER.catch(reraise=True)
def continuously_fit(self):
self._fit_status = self._job_invoker.monitor_job_status(self._train_job_id,
self._initiator.role,
self._initiator.party_id,
previous_status=self._fit_status)
@LOGGER.catch(reraise=True)
def predict(self, job_parameters=None, components_checkpoint=None):
"""
Parameters
----------
job_parameters: None
components_checkpoint: specify which model to take, ex.: {"hetero_lr_0": {"step_index": 8}}
Returns
-------
"""
if self._stage != "predict":
raise ValueError(
"To use predict function, please deploy component(s) from training pipeline"
"and construct a new predict pipeline with data reader and training pipeline.")
if job_parameters and not isinstance(job_parameters, JobParameters):
raise ValueError("input parameter of fit function should be JobParameters object")
self.compile()
res_dict = self._job_invoker.model_deploy(model_id=self._model_info.model_id,
model_version=self._model_info.model_version,
predict_dsl=self._predict_dsl,
components_checkpoint=components_checkpoint)
self._predict_model_info = SimpleNamespace(model_id=res_dict["model_id"],
model_version=res_dict["model_version"])
predict_conf = self._feed_job_parameters(self._train_conf,
job_type="predict",
model_info=self._predict_model_info,
job_parameters=job_parameters)
predict_conf = self._filter_out_deploy_component(predict_conf)
self._predict_conf = copy.deepcopy(predict_conf)
predict_dsl = copy.deepcopy(self._predict_dsl)
self._predict_job_id, _ = self._job_invoker.submit_job(dsl=predict_dsl, submit_conf=predict_conf)
self._job_invoker.monitor_job_status(self._predict_job_id,
self._initiator.role,
self._initiator.party_id)
@LOGGER.catch(reraise=True)
def upload(self, drop=0):
for data_conf in self._upload_conf:
upload_conf = self._construct_upload_conf(data_conf)
LOGGER.debug(f"upload_conf is {json.dumps(upload_conf)}")
self._train_job_id, detail_info = self._job_invoker.upload_data(upload_conf, int(drop))
self._train_board_url = detail_info["board_url"]
self._job_invoker.monitor_job_status(self._train_job_id,
"local",
0)
@LOGGER.catch(reraise=True)
def dump(self, file_path=None):
pkl = pickle.dumps(self)
if file_path is not None:
with open(file_path, "wb") as fout:
fout.write(pkl)
return pkl
@classmethod
def load(cls, pipeline_bytes):
"""
return pickle.loads(pipeline_bytes)
"""
pipeline_obj = pickle.loads(pipeline_bytes)
pipeline_obj.set_job_invoker(JobInvoker())
return pipeline_obj
@classmethod
def load_model_from_file(cls, file_path):
with open(file_path, "rb") as fin:
pipeline_obj = pickle.loads(fin.read())
pipeline_obj.set_job_invoker(JobInvoker())
return pipeline_obj
@LOGGER.catch(reraise=True)
def deploy_component(self, components=None):
if self._train_dsl is None:
raise ValueError("Before deploy model, training should be finished!!!")
if components is None:
components = self._components
deploy_cpns = []
for cpn in components:
if isinstance(cpn, str):
deploy_cpns.append(cpn)
elif isinstance(cpn, Component):
deploy_cpns.append(cpn.name)
else:
raise ValueError(
"deploy component parameters is wrong, expect str or Component object, but {} find".format(
type(cpn)))
if deploy_cpns[-1] not in self._components:
raise ValueError("Component {} does not exist in pipeline".format(deploy_cpns[-1]))
if isinstance(self._components.get(deploy_cpns[-1]), Reader):
raise ValueError("Reader should not be include in predict pipeline")
res_dict = self._job_invoker.model_deploy(model_id=self._model_info.model_id,
model_version=self._model_info.model_version,
cpn_list=deploy_cpns)
self._predict_model_info = SimpleNamespace(model_id=res_dict["model_id"],
model_version=res_dict["model_version"])
self._predict_dsl = self._job_invoker.get_predict_dsl(model_id=res_dict["model_id"],
model_version=res_dict["model_version"])
if self._predict_dsl:
self._deploy = True
return self
def is_deploy(self):
return self._deploy
def is_load(self):
return self._load
@LOGGER.catch(reraise=True)
def init_predict_config(self, config):
if isinstance(config, PipeLine):
config = config.get_predict_meta()
self._stage = "predict"
self._model_info = config["model_info"]
self._predict_dsl = config["predict_dsl"]
self._train_conf = config["train_conf"]
self._initiator = config["initiator"]
self._train_components = config["train_components"]
@LOGGER.catch(reraise=True)
def get_component_input_msg(self):
if VERSION != 2:
raise ValueError("In DSL Version 1,only need to config data from args, do not need special component")
need_input = {}
for cpn_name, config in self._predict_dsl["components"].items():
if "input" not in config:
continue
if "data" not in config["input"]:
continue
data_config = config["input"]["data"]
for data_type, dataset_list in data_config.items():
for data_set in dataset_list:
input_cpn = data_set.split(".", -1)[0]
input_inst = self._components[input_cpn]
if isinstance(input_inst, Reader):
if cpn_name not in need_input:
need_input[cpn_name] = {}
need_input[cpn_name][data_type] = []
need_input[cpn_name][data_type].append(input_cpn)
return need_input
@LOGGER.catch(reraise=True)
def get_input_reader_placeholder(self):
input_info = self.get_component_input_msg()
input_placeholder = set()
for cpn_name, data_dict in input_info.items():
for data_type, dataset_list in data_dict.items():
for dataset in dataset_list:
input_placeholder.add(dataset)
return input_placeholder
@LOGGER.catch(reraise=True)
def set_inputs(self, data_dict):
if not isinstance(data_dict, dict):
raise ValueError(
"inputs for predicting should be a dict, key is input_placeholder name, value is a reader object")
unfilled_placeholder = self.get_input_reader_placeholder() - set(data_dict.keys())
if unfilled_placeholder:
raise ValueError("input placeholder {} should be fill".format(unfilled_placeholder))
self._data_to_feed_in_prediction = data_dict
@LOGGER.catch(reraise=True)
def bind_table(self, name, namespace, path, engine='PATH', replace=True, **kwargs):
info = self._job_invoker.bind_table(engine=engine, name=name, namespace=namespace, address={
"path": path
}, drop=replace, **kwargs)
return info
# @LOGGER.catch(reraise=True)
def __getattr__(self, attr):
if attr in self._components:
return self._components[attr]
return self.__getattribute__(attr)
@LOGGER.catch(reraise=True)
def __getitem__(self, item):
if item not in self._components:
raise ValueError("Pipeline does not has component }{}".format(item))
return self._components[item]
def __getstate__(self):
return vars(self)
def __setstate__(self, state):
vars(self).update(state)
| 34,546 | 40.72343 | 120 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_homo_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.homo_lr import HomoLR
a = HomoLR(name="homo_lr_0")
print(a.output.data)
print(a.output.model)
| 739 | 28.6 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_upload.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import uuid
from fate_arch.session import computing_session as session
from pipeline.backend.config import Backend
from pipeline.backend.config import WorkMode
from pipeline.backend.pipeline import PipeLine
class TestUpload(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
self.file = "examples/data/breast_homo_guest.csv"
self.table_name = "breast_homo_guest"
self.data_count = 227
def test_upload(self):
upload_pipeline = PipeLine()
upload_pipeline.add_upload_data(file=self.file,
table_name=self.table_name, namespace=self.job_id)
upload_pipeline.upload()
upload_count = session.get_data_table(self.table_name, self.job_id).count()
return upload_count == self.data_count
def tearDown(self):
session.stop()
try:
session.cleanup("*", self.job_id, True)
except EnvironmentError:
pass
try:
session.cleanup("*", self.job_id, False)
except EnvironmentError:
pass
if __name__ == '__main__':
unittest.main()
| 1,860 | 30.542373 | 90 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_hetero_secureboost.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.hetero_secureboost import HeteroSecureBoost
a = HeteroSecureBoost(name="hetero_secureboost_0")
print(a.output.data)
print(a.output.model)
| 783 | 30.36 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_hetero_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.hetero_lr import HeteroLR
a = HeteroLR(name="hetero_lr_0", early_stop="weight_diff",
cv_param={"n_splits": 3, "shuffle": False, "need_cv": True})
print(a.output.data)
print(a.output.model)
| 847 | 31.615385 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_evaluation.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.evaluation import Evaluation
a = Evaluation(name="evaluation_0")
print(a.output.data)
| 731 | 29.5 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_component.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.component_base import Component
a = Component(name="test")
b = a.get_party_instance(role='guest', party_id=1)
bb = a.get_party_instance(role='guest', party_id=[1, 2, 3, 4])
c = Component()
print(a.name)
print(b.name)
print(bb.name)
print(c.name)
| 890 | 29.724138 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_scale.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.scale import FeatureScale
a = FeatureScale(name="scale_0")
print(a.output.data)
| 725 | 29.25 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_intersection.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.intersection import Intersection
a = Intersection(name="intersection_0")
print(a.output.data)
| 739 | 29.833333 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_one_hot_encoder.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.one_hot_encoder import OneHotEncoder
a = OneHotEncoder(name="one_hot_encoder_0")
print(a.output.data)
| 747 | 30.166667 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_hetero_poisson.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.hetero_poisson import HeteroPoisson
a = HeteroPoisson(name="hetero_poisson_0", early_stop="diff",
early_stopping_rounds=3, validation_freqs=3)
print(a.output.data)
print(a.output.model)
| 849 | 31.692308 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_local_baseline.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.local_baseline import LocalBaseline
a = LocalBaseline(name="local_baseline_0", need_run=False)
print(a.output.data)
print(a.output.model)
| 783 | 30.36 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_sampler.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.sampler import FederatedSample
a = FederatedSample(name="federated_sample_0", method="upsample")
print(a.output.data)
print(a.output.model)
| 785 | 30.44 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_hetero_pearson.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.hetero_pearson import HeteroPearson
a = HeteroPearson(name="hetero_pearson_0", column_indexes=[4, 2])
print(a.output.data)
print(a.output.model)
| 790 | 30.64 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_hetero_linr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.hetero_linr import HeteroLinR
a = HeteroLinR(name="hetero_linr_0", early_stop="weight_diff",
stepwise_param={"max_step": 3, "need_stepwise": True})
print(a.output.data)
print(a.output.model)
| 851 | 31.769231 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_homo_nn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.homo_nn import HomoNN
a = HomoNN(name="homo_nn_0")
print(a.output.data)
print(a.output.model)
| 739 | 28.6 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_union.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.union import Union
a = Union(name="union_0")
print(a.output.data)
| 711 | 28.666667 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_homo_secureboost.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.homo_secureboost import HomoSecureBoost
a = HomoSecureBoost(name="homo_secureboost_0")
print(a.output.data)
print(a.output.model)
| 775 | 30.04 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/test/test_hetero_nn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.hetero_nn import HeteroNN
a = HeteroNN(name="hetero_nn_0", epochs=3)
print(a.output.data)
print(a.output.model)
| 757 | 29.32 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/interface/model.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Model(object):
def __init__(self, model=None, isometric_model=None):
self._model = model
self._isometric_model = isometric_model
def __getattr__(self, model_key):
if model_key == "model":
return self.model
elif model_key == "isometric_model":
return self._isometric_model
else:
raise ValueError("model key {} not support".format(model_key))
| 1,050 | 34.033333 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/interface/data.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.backend.config import VERSION
class Data(object):
def __init__(self, data=None, train_data=None, validate_data=None, test_data=None, predict_input=None):
self._data = data
self._train_data = train_data
self._validate_data = validate_data
self._test_data = test_data
self._predict_input = predict_input
def __getattr__(self, data_key):
if data_key == "train_data":
return self._train_data
elif data_key == "validate_data":
return self._validate_data
elif data_key == "test_data":
return self._test_data
elif data_key == "data":
return self._data
elif data_key == "predict_input":
return self._predict_input
else:
raise ValueError("data key {} not support".format(data_key))
| 1,479 | 31.888889 | 107 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/interface/cache.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Cache(object):
def __init__(self, cache=None):
self._cache = cache
def __getattr__(self, cache_key):
if cache_key == "cache":
return self._cache
else:
raise ValueError("cache key {} not support".format(cache_key))
| 895 | 32.185185 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/interface/__init__.py
|
from pipeline.interface.data import Data
from pipeline.interface.input import Input
from pipeline.interface.output import Output
from pipeline.interface.model import Model
from pipeline.interface.cache import Cache
__all__ = ["Data", "Input", "Output", "Model", "Cache"]
| 273 | 29.444444 | 55 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/interface/output.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.backend.config import IODataType
class Output(object):
def __init__(self, name, data_type='single', has_data=True, has_model=True, has_cache=False, output_unit=1):
if has_model:
self.model = Model(name).model
self.model_output = Model(name).get_all_output()
if has_data:
if data_type == "single":
self.data = SingleOutputData(name).data
self.data_output = SingleOutputData(name).get_all_output()
elif data_type == "multi":
self.data = TraditionalMultiOutputData(name)
self.data_output = TraditionalMultiOutputData(name).get_all_output()
else:
self.data = NoLimitOutputData(name, output_unit)
self.data_output = NoLimitOutputData(name, output_unit).get_all_output()
if has_cache:
self.cache = Cache(name).cache
self.cache_output = Cache(name).get_all_output()
class Model(object):
def __init__(self, prefix):
self.prefix = prefix
@property
def model(self):
return ".".join([self.prefix, "model"])
@staticmethod
def get_all_output():
return ["model"]
class SingleOutputData(object):
def __init__(self, prefix):
self.prefix = prefix
@property
def data(self):
return ".".join([self.prefix, IODataType.SINGLE])
@staticmethod
def get_all_output():
return ["data"]
class TraditionalMultiOutputData(object):
def __init__(self, prefix):
self.prefix = prefix
@property
def train_data(self):
return ".".join([self.prefix, IODataType.TRAIN])
@property
def test_data(self):
return ".".join([self.prefix, IODataType.TEST])
@property
def validate_data(self):
return ".".join([self.prefix, IODataType.VALIDATE])
@staticmethod
def get_all_output():
return [IODataType.TRAIN,
IODataType.VALIDATE,
IODataType.TEST]
class NoLimitOutputData(object):
def __init__(self, prefix, output_unit=1):
self.prefix = prefix
self.output_unit = output_unit
@property
def data(self):
return [self.prefix + "." + "data_" + str(i) for i in range(self.output_unit)]
def get_all_output(self):
return ["data_" + str(i) for i in range(self.output_unit)]
class Cache(object):
def __init__(self, prefix):
self.prefix = prefix
@property
def cache(self):
return ".".join([self.prefix, "cache"])
@staticmethod
def get_all_output():
return ["cache"]
| 3,257 | 27.330435 | 112 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/interface/input.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.backend.config import IODataType
class Input(object):
def __init__(self, name, data_type="single"):
if data_type == "single":
self.data = InputData(name).data
self.data_output = InputData(name).get_all_input()
elif data_type == "multi":
self.data = TrainingInputData(name)
self.data_output = InputData(name).get_all_input()
else:
raise ValueError("input data type should be one of ['single', 'multi']")
class InputData(object):
def __init__(self, prefix):
self.prefix = prefix
@property
def data(self):
return ".".join([self.prefix, IODataType.SINGLE])
@staticmethod
def get_all_input():
return ["data"]
class TrainingInputData(object):
def __init__(self, prefix):
self.prefix = prefix
@property
def train_data(self):
return ".".join([self.prefix, IODataType.TRAIN])
@property
def test_data(self):
return ".".join([self.prefix, IODataType.TEST])
@property
def validate_data(self):
return ".".join([self.prefix, IODataType.VALIDATE])
@staticmethod
def get_all_input():
return [IODataType.TRAIN,
IODataType.VALIDATE,
IODataType.TEST]
| 1,914 | 28.461538 | 84 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/demo/pipeline-mini-demo.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroLR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
def main():
# parties config
guest = 9999
host = 10000
arbiter = 10000
# specify input data name & namespace in database
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host", "namespace": "experiment"}
guest_eval_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_eval_data = {"name": "breast_hetero_host", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role="guest", party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_eval_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_eval_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_1 = DataTransform(name="data_transform_1")
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role="guest", party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
intersection_1 = Intersection(name="intersection_1")
# define HeteroLR component
hetero_lr_0 = HeteroLR(name="hetero_lr_0", early_stop="weight_diff", learning_rate=0.15, optimizer="rmsprop",
max_iter=10, early_stopping_rounds=2, validation_freqs=1)
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
# set data_transform_1 to replicate model from data_transform_0
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(
hetero_lr_0,
data=Data(
train_data=intersection_0.output.data,
validate_data=intersection_1.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
import json
print(json.dumps(pipeline.get_component("hetero_lr_0").get_summary(), indent=4))
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersection_0, hetero_lr_0])
# initiate predict pipeline
predict_pipeline = PipeLine()
reader_2 = Reader(name="reader_2")
reader_2.get_party_instance(role="guest", party_id=guest).component_param(table=guest_eval_data)
reader_2.get_party_instance(role="host", party_id=host).component_param(table=host_eval_data)
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_2)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(pipeline, data=Data(
predict_input={pipeline.data_transform_0.input.data: reader_2.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
main()
| 5,265 | 41.128 | 113 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/demo/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/fate_client/pipeline/demo/pipeline-upload.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from pipeline.backend.pipeline import PipeLine
# path to data
# default fate installation path
DATA_BASE = "/data/projects/fate"
def main():
# parties config
guest = 9999
# partition for data storage
partition = 4
dense_data = {"name": "breast_hetero_guest", "namespace": f"experiment"}
tag_data = {"name": "tag_value_1", "namespace": f"experiment"}
pipeline_upload = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest)
# add upload data info
# original csv file path
pipeline_upload.add_upload_data(file=os.path.join(DATA_BASE, "examples/data/breast_hetero_guest.csv"),
table_name=dense_data["name"], # table name
namespace=dense_data["namespace"], # namespace
head=1, partition=partition) # data info
pipeline_upload.add_upload_data(file=os.path.join(DATA_BASE, "examples/data/tag_value_1000_140.csv"),
table_name=tag_data["name"],
namespace=tag_data["namespace"],
head=0, partition=partition)
# upload all data
pipeline_upload.upload(drop=1)
if __name__ == "__main__":
main()
| 1,950 | 33.839286 | 106 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/demo/pipeline-upload-spark.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from pipeline.backend.pipeline import PipeLine
# path to data
# default fate installation path
DATA_BASE = "/data/projects/fate"
def main():
# parties config
guest = 9999
# partition for data storage
partition = 4
dense_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
tag_data = {"name": "tag_value_1", "namespace": "experiment"}
pipeline_upload = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest)
# add upload data info
# csv file name from python path & file name
pipeline_upload.add_upload_data(file=os.path.join(DATA_BASE, "examples/data/breast_hetero_guest.csv"),
table_name=dense_data["name"], # table name
namespace=dense_data["namespace"], # namespace
head=1, partition=partition, # data info
id_delimiter=",") # id delimiter, needed for spark
pipeline_upload.add_upload_data(file=os.path.join(DATA_BASE, "examples/data/tag_value_1000_140.csv"),
table_name=tag_data["name"],
namespace=tag_data["namespace"],
head=0, partition=partition,
id_delimiter=",")
# upload all data
pipeline_upload.upload(drop=1)
import json
print(json.dumps(pipeline_upload._upload_conf(), indent=4))
if __name__ == "__main__":
main()
| 2,213 | 36.525424 | 111 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/utils/logger.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from pathlib import Path
import loguru
from pipeline.backend.config import LogPath, LogFormat, PipelineConfig
RUNTIME_LOG = "runtime"
info_log_path = os.path.join(LogPath.log_directory(), LogPath.INFO)
debug_log_path = os.path.join(LogPath.log_directory(), LogPath.DEBUG)
error_log_path = os.path.join(LogPath.log_directory(), LogPath.ERROR)
def runtime_log_only(record):
log_type = record["extra"].get("log_type", "")
return log_type == RUNTIME_LOG
LOGGER = loguru.logger
LOGGER.remove()
LOGGER.configure(extra={"format": LogFormat.NORMAL})
if PipelineConfig.CONSOLE_DISPLAY_LOG:
console_handler = LOGGER.add(sys.stderr, level="INFO", colorize=True,
filter=runtime_log_only)
LOGGER.add(Path(info_log_path).resolve(), level="INFO", rotation="500MB",
colorize=True, filter=runtime_log_only)
LOGGER.add(Path(debug_log_path).resolve(), level="DEBUG", rotation="500MB", colorize=True,
filter=runtime_log_only)
LOGGER.add(Path(error_log_path).resolve(), level="ERROR", rotation="500MB", colorize=True,
backtrace=True, filter=runtime_log_only)
LOGGER = LOGGER.bind(log_type=RUNTIME_LOG)
def disable_console_log():
"""
disable logging to stderr, for silent mode
Returns
-------
"""
try:
LOGGER.remove(console_handler)
except BaseException:
pass
def enable_console_log():
disable_console_log()
global console_handler
console_handler = LOGGER.add(sys.stderr, level="INFO", colorize=True,
filter=runtime_log_only)
| 2,223 | 30.323944 | 90 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.