Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def set_flow_node_ref_list(self, value):
if value is None or not isinstance(value, list):
raise TypeError("FlowNodeRefList new value must be a list")
else:
for element in value:
if not isinstance(element, str):
raise TypeError("FlowNodeRefList elements in variable must be of String class")
self.__flow_node_ref_list = value | [
"\n Setter for 'flow_node_ref' field.\n :param value - a new value of 'flow_node_ref' field. Must be a list of String objects (ID of referenced nodes).\n "
] |
Please provide a description of the function:def set_child_lane_set(self, value):
if value is None:
self.__child_lane_set = value
elif not isinstance(value, lane_set.LaneSet):
raise TypeError("ChildLaneSet must be a LaneSet")
else:
self.__child_lane_set = value | [
"\n Setter for 'child_lane_set' field.\n :param value - a new value of 'child_lane_set' field. Must be an object of LaneSet type.\n "
] |
Please provide a description of the function:def export_xml_file(self, directory, filename):
bpmn_export.BpmnDiagramGraphExport.export_xml_file(directory, filename, self) | [
"\n Exports diagram inner graph to BPMN 2.0 XML file (with Diagram Interchange data).\n\n :param directory: strings representing output directory,\n :param filename: string representing output file name.\n "
] |
Please provide a description of the function:def export_xml_file_no_di(self, directory, filename):
bpmn_export.BpmnDiagramGraphExport.export_xml_file_no_di(directory, filename, self) | [
"\n Exports diagram inner graph to BPMN 2.0 XML file (without Diagram Interchange data).\n\n :param directory: strings representing output directory,\n :param filename: string representing output file name.\n "
] |
Please provide a description of the function:def export_csv_file(self, directory, filename):
bpmn_csv_export.BpmnDiagramGraphCsvExport.export_process_to_csv(self, directory, filename) | [
"\n Exports diagram inner graph to BPMN 2.0 XML file (with Diagram Interchange data).\n\n :param directory: strings representing output directory,\n :param filename: string representing output file name.\n "
] |
Please provide a description of the function:def get_nodes(self, node_type=""):
tmp_nodes = self.diagram_graph.nodes(True)
if node_type == "":
return tmp_nodes
else:
nodes = []
for node in tmp_nodes:
if node[1][consts.Consts.type] == node_type:
nodes.append(node)
return nodes | [
"\n Gets all nodes of requested type. If no type is provided by user, all nodes in BPMN diagram graph are returned.\n Returns a dictionary, where key is an ID of node, value is a dictionary of all node attributes.\n\n :param node_type: string with valid BPMN XML tag name (e.g. 'task', 'sequenceFlow').\n "
] |
Please provide a description of the function:def get_nodes_list_by_process_id(self, process_id):
tmp_nodes = self.diagram_graph.nodes(True)
nodes = []
for node in tmp_nodes:
if node[1][consts.Consts.process] == process_id:
nodes.append(node)
return nodes | [
"\n Gets all nodes of requested type. If no type is provided by user, all nodes in BPMN diagram graph are returned.\n Returns a dictionary, where key is an ID of node, value is a dictionary of all node attributes.\n\n :param process_id: string object, representing an ID of parent process element.\n "
] |
Please provide a description of the function:def get_node_by_id(self, node_id):
tmp_nodes = self.diagram_graph.nodes(data=True)
for node in tmp_nodes:
if node[0] == node_id:
return node | [
"\n Gets a node with requested ID.\n Returns a tuple, where first value is node ID, second - a dictionary of all node attributes.\n\n :param node_id: string with ID of node.\n "
] |
Please provide a description of the function:def get_nodes_id_list_by_type(self, node_type):
tmp_nodes = self.diagram_graph.nodes(data=True)
id_list = []
for node in tmp_nodes:
if node[1][consts.Consts.type] == node_type:
id_list.append(node[0])
return id_list | [
"\n Get a list of node's id by requested type.\n Returns a list of ids\n\n :param node_type: string with valid BPMN XML tag name (e.g. 'task', 'sequenceFlow').\n "
] |
Please provide a description of the function:def get_flow_by_id(self, flow_id):
tmp_flows = self.diagram_graph.edges(data=True)
for flow in tmp_flows:
if flow[2][consts.Consts.id] == flow_id:
return flow | [
"\n Gets an edge (flow) with requested ID.\n Returns a tuple, where first value is node ID, second - a dictionary of all node attributes.\n\n :param flow_id: string with edge ID.\n "
] |
Please provide a description of the function:def get_flows_list_by_process_id(self, process_id):
tmp_flows = self.diagram_graph.edges(data=True)
flows = []
for flow in tmp_flows:
if consts.Consts.process in flow[2] and flow[2][consts.Consts.process] == process_id:
flows.append(flow)
return flows | [
"\n Gets an edge (flow) with requested ID.\n Returns a tuple, where first value is node ID, second - a dictionary of all node attributes.\n\n :param process_id: string object, representing an ID of parent process element.\n "
] |
Please provide a description of the function:def create_new_diagram_graph(self, diagram_name=""):
self.__init__()
diagram_id = BpmnDiagramGraph.id_prefix + str(uuid.uuid4())
self.diagram_attributes[consts.Consts.id] = diagram_id
self.diagram_attributes[consts.Consts.name] = diagram_name | [
"\n Initializes a new BPMN diagram and sets up a basic diagram attributes.\n Accepts a user-defined values for following attributes:\n (Diagram element)\n\n - name - default value empty string.\n\n :param diagram_name: string type. Represents a user-defined value of 'BPMNDiagram' element\n attribute 'name'. Default value - empty string.\n "
] |
Please provide a description of the function:def add_process_to_diagram(self, process_name="", process_is_closed=False, process_is_executable=False,
process_type="None"):
plane_id = BpmnDiagramGraph.id_prefix + str(uuid.uuid4())
process_id = BpmnDiagramGraph.id_prefix + str(uuid.uuid4())
self.process_elements[process_id] = {consts.Consts.name: process_name,
consts.Consts.is_closed: "true" if process_is_closed else "false",
consts.Consts.is_executable: "true" if process_is_executable else "false",
consts.Consts.process_type: process_type}
self.plane_attributes[consts.Consts.id] = plane_id
self.plane_attributes[consts.Consts.bpmn_element] = process_id
return process_id | [
"\n Adds a new process to diagram and corresponding participant\n process, diagram and plane\n\n Accepts a user-defined values for following attributes:\n (Process element)\n - isClosed - default value false,\n - isExecutable - default value false,\n - processType - default value None.\n\n :param process_name: string obejct, process name. Default value - empty string,\n :param process_is_closed: boolean type. Represents a user-defined value of 'process' element\n attribute 'isClosed'. Default value false,\n :param process_is_executable: boolean type. Represents a user-defined value of 'process' element\n attribute 'isExecutable'. Default value false,\n :param process_type: string type. Represents a user-defined value of 'process' element\n attribute 'procesType'. Default value \"None\",\n "
] |
Please provide a description of the function:def add_flow_node_to_diagram(self, process_id, node_type, name, node_id=None):
if node_id is None:
node_id = BpmnDiagramGraph.id_prefix + str(uuid.uuid4())
self.diagram_graph.add_node(node_id)
self.diagram_graph.node[node_id][consts.Consts.id] = node_id
self.diagram_graph.node[node_id][consts.Consts.type] = node_type
self.diagram_graph.node[node_id][consts.Consts.node_name] = name
self.diagram_graph.node[node_id][consts.Consts.incoming_flow] = []
self.diagram_graph.node[node_id][consts.Consts.outgoing_flow] = []
self.diagram_graph.node[node_id][consts.Consts.process] = process_id
# Adding some dummy constant values
self.diagram_graph.node[node_id][consts.Consts.width] = "100"
self.diagram_graph.node[node_id][consts.Consts.height] = "100"
self.diagram_graph.node[node_id][consts.Consts.x] = "100"
self.diagram_graph.node[node_id][consts.Consts.y] = "100"
return node_id, self.diagram_graph.node[node_id] | [
"\n Helper function that adds a new Flow Node to diagram. It is used to add a new node of specified type.\n Adds a basic information inherited from Flow Node type.\n\n :param process_id: string object. ID of parent process,\n :param node_type: string object. Represents type of BPMN node passed to method,\n :param name: string object. Name of the node,\n :param node_id: string object. ID of node. Default value - None.\n "
] |
Please provide a description of the function:def add_task_to_diagram(self, process_id, task_name="", node_id=None):
return self.add_flow_node_to_diagram(process_id, consts.Consts.task, task_name, node_id) | [
"\n Adds a Task element to BPMN diagram.\n User-defined attributes:\n\n - name\n\n\n :param process_id: string object. ID of parent process,\n :param task_name: string object. Name of task,\n :param node_id: string object. ID of node. Default value - None.\n :return: a tuple, where first value is task ID, second a reference to created object.\n "
] |
Please provide a description of the function:def add_subprocess_to_diagram(self, process_id, subprocess_name, is_expanded=False, triggered_by_event=False,
node_id=None):
subprocess_id, subprocess = self.add_flow_node_to_diagram(process_id, consts.Consts.subprocess, subprocess_name,
node_id)
self.diagram_graph.node[subprocess_id][consts.Consts.is_expanded] = "true" if is_expanded else "false"
self.diagram_graph.node[subprocess_id][consts.Consts.triggered_by_event] = \
"true" if triggered_by_event else "false"
return subprocess_id, subprocess | [
"\n Adds a SubProcess element to BPMN diagram.\n User-defined attributes:\n\n - name\n - triggered_by_event\n\n\n :param process_id: string object. ID of parent process,\n :param subprocess_name: string object. Name of subprocess,\n :param is_expanded: boolean value for attribute \"isExpanded\". Default value false,\n :param triggered_by_event: boolean value for attribute \"triggeredByEvent\". Default value false,\n :param node_id: string object. ID of node. Default value - None.\n :return: a tuple, where first value is subProcess ID, second a reference to created object.\n "
] |
Please provide a description of the function:def add_start_event_to_diagram(self, process_id, start_event_name="", start_event_definition=None,
parallel_multiple=False, is_interrupting=True, node_id=None):
start_event_id, start_event = self.add_flow_node_to_diagram(process_id, consts.Consts.start_event,
start_event_name, node_id)
self.diagram_graph.node[start_event_id][consts.Consts.parallel_multiple] = \
"true" if parallel_multiple else "false"
self.diagram_graph.node[start_event_id][consts.Consts.is_interrupting] = "true" if is_interrupting else "false"
start_event_definitions = {"message": "messageEventDefinition", "timer": "timerEventDefinition",
"conditional": "conditionalEventDefinition", "signal": "signalEventDefinition",
"escalation": "escalationEventDefinition"}
event_def_list = []
if start_event_definition == "message":
event_def_list.append(BpmnDiagramGraph.add_event_definition_element("message", start_event_definitions))
elif start_event_definition == "timer":
event_def_list.append(BpmnDiagramGraph.add_event_definition_element("timer", start_event_definitions))
elif start_event_definition == "conditional":
event_def_list.append(BpmnDiagramGraph.add_event_definition_element("conditional", start_event_definitions))
elif start_event_definition == "signal":
event_def_list.append(BpmnDiagramGraph.add_event_definition_element("signal", start_event_definitions))
elif start_event_definition == "escalation":
event_def_list.append(BpmnDiagramGraph.add_event_definition_element("escalation", start_event_definitions))
self.diagram_graph.node[start_event_id][consts.Consts.event_definitions] = event_def_list
return start_event_id, start_event | [
"\n Adds a StartEvent element to BPMN diagram.\n\n User-defined attributes:\n\n - name\n - parallel_multiple\n - is_interrupting\n - event definition (creates a special type of start event). Supported event definitions -\n * 'message': 'messageEventDefinition', \n * 'timer': 'timerEventDefinition', \n * 'signal': 'signalEventDefinition',\n * 'conditional': 'conditionalEventDefinition', \n * 'escalation': 'escalationEventDefinition'.\n\n :param process_id: string object. ID of parent process,\n :param start_event_name: string object. Name of start event,\n :param start_event_definition: list of event definitions. By default - empty,\n :param parallel_multiple: boolean value for attribute \"parallelMultiple\",\n :param is_interrupting: boolean value for attribute \"isInterrupting,\n :param node_id: string object. ID of node. Default value - None.\n\n :return: a tuple, where first value is startEvent ID, second a reference to created object.\n "
] |
Please provide a description of the function:def add_end_event_to_diagram(self, process_id, end_event_name="", end_event_definition=None, node_id=None):
end_event_id, end_event = self.add_flow_node_to_diagram(process_id, consts.Consts.end_event, end_event_name,
node_id)
end_event_definitions = {"terminate": "terminateEventDefinition", "escalation": "escalationEventDefinition",
"message": "messageEventDefinition", "compensate": "compensateEventDefinition",
"signal": "signalEventDefinition", "error": "errorEventDefinition"}
event_def_list = []
if end_event_definition == "terminate":
event_def_list.append(self.add_event_definition_element("terminate", end_event_definitions))
elif end_event_definition == "escalation":
event_def_list.append(self.add_event_definition_element("escalation", end_event_definitions))
elif end_event_definition == "message":
event_def_list.append(self.add_event_definition_element("message", end_event_definitions))
elif end_event_definition == "compensate":
event_def_list.append(self.add_event_definition_element("compensate", end_event_definitions))
elif end_event_definition == "signal":
event_def_list.append(self.add_event_definition_element("signal", end_event_definitions))
elif end_event_definition == "error":
event_def_list.append(self.add_event_definition_element("error", end_event_definitions))
self.diagram_graph.node[end_event_id][consts.Consts.event_definitions] = event_def_list
return end_event_id, end_event | [
"\n Adds an EndEvent element to BPMN diagram.\n User-defined attributes:\n\n - name\n - event definition (creates a special type of end event). Supported event definitions\n * `terminate`: 'terminateEventDefinition', \n * `signal`: 'signalEventDefinition', \n * `error`: 'errorEventDefinition',\n * `escalation`: 'escalationEventDefinition', \n * `message`: 'messageEventDefinition',\n * `compensate`: 'compensateEventDefinition'.\n\n :param process_id: string object. ID of parent process,\n :param end_event_name: string object. Name of end event,\n :param end_event_definition: list of event definitions. By default - empty.\n :param node_id: string object. ID of node. Default value - None.\n :return: a tuple, where first value is endEvent ID, second a reference to created object,\n "
] |
Please provide a description of the function:def add_event_definition_element(event_type, event_definitions):
event_def_id = BpmnDiagramGraph.id_prefix + str(uuid.uuid4())
event_def = {consts.Consts.id: event_def_id, consts.Consts.definition_type: event_definitions[event_type]}
return event_def | [
"\n Helper function, that creates event definition element (special type of event) from given parameters.\n\n :param event_type: string object. Short name of required event definition,\n :param event_definitions: dictionary of event definitions. Key is a short name of event definition,\n value is a full name of event definition, as defined in BPMN 2.0 XML Schema.\n "
] |
Please provide a description of the function:def add_gateway_to_diagram(self, process_id, gateway_type, gateway_name="", gateway_direction="Unspecified",
node_id=None):
gateway_id, gateway = self.add_flow_node_to_diagram(process_id, gateway_type, gateway_name, node_id)
if not (gateway_direction in ("Unspecified", "Converging", "Diverging", "Mixed")):
raise bpmn_exception.BpmnPythonError("Invalid value passed as gatewayDirection parameter. Value passed: "
+ gateway_direction)
self.diagram_graph.node[gateway_id][consts.Consts.gateway_direction] = gateway_direction
return gateway_id, gateway | [
"\n Adds an exclusiveGateway element to BPMN diagram.\n\n :param process_id: string object. ID of parent process,\n :param gateway_type: string object. Type of gateway to be added.\n :param gateway_name: string object. Name of exclusive gateway,\n :param gateway_direction: string object. Accepted values - \"Unspecified\", \"Converging\", \"Diverging\", \"Mixed\".\n Default value - \"Unspecified\",\n :param node_id: string object. ID of node. Default value - None.\n\n :return: a tuple, where first value is gateway ID, second a reference to created object.\n "
] |
Please provide a description of the function:def add_exclusive_gateway_to_diagram(self, process_id, gateway_name="", gateway_direction="Unspecified",
default=None, node_id=None):
exclusive_gateway_id, exclusive_gateway = self.add_gateway_to_diagram(process_id,
consts.Consts.exclusive_gateway,
gateway_name=gateway_name,
gateway_direction=gateway_direction,
node_id=node_id)
self.diagram_graph.node[exclusive_gateway_id][consts.Consts.default] = default
return exclusive_gateway_id, exclusive_gateway | [
"\n Adds an exclusiveGateway element to BPMN diagram.\n\n :param process_id: string object. ID of parent process,\n :param gateway_name: string object. Name of exclusive gateway,\n :param gateway_direction: string object. Accepted values - \"Unspecified\", \"Converging\", \"Diverging\", \"Mixed\".\n Default value - \"Unspecified\".\n :param default: string object. ID of flow node, target of gateway default path. Default value - None,\n :param node_id: string object. ID of node. Default value - None.\n \n :return: a tuple, where first value is exculusiveGateway ID, second a reference to created object.\n "
] |
Please provide a description of the function:def add_inclusive_gateway_to_diagram(self, process_id, gateway_name="", gateway_direction="Unspecified",
default=None, node_id=None):
inclusive_gateway_id, inclusive_gateway = self.add_gateway_to_diagram(process_id,
consts.Consts.inclusive_gateway,
gateway_name=gateway_name,
gateway_direction=gateway_direction,
node_id=node_id)
self.diagram_graph.node[inclusive_gateway_id][consts.Consts.default] = default
return inclusive_gateway_id, inclusive_gateway | [
"\n Adds an inclusiveGateway element to BPMN diagram.\n\n :param process_id: string object. ID of parent process,\n :param gateway_name: string object. Name of inclusive gateway,\n :param gateway_direction: string object. Accepted values - \"Unspecified\", \"Converging\", \"Diverging\", \"Mixed\".\n Default value - \"Unspecified\",\n :param default: string object. ID of flow node, target of gateway default path. Default value - None,\n :param node_id: string object. ID of node. Default value - None.\n\n :return: a tuple, where first value is inclusiveGateway ID, second a reference to created object.\n "
] |
Please provide a description of the function:def add_parallel_gateway_to_diagram(self, process_id, gateway_name="", gateway_direction="Unspecified",
node_id=None):
parallel_gateway_id, parallel_gateway = self.add_gateway_to_diagram(process_id,
consts.Consts.parallel_gateway,
gateway_name=gateway_name,
gateway_direction=gateway_direction,
node_id=node_id)
return parallel_gateway_id, parallel_gateway | [
"\n Adds an parallelGateway element to BPMN diagram.\n\n :param process_id: string object. ID of parent process,\n :param gateway_name: string object. Name of inclusive gateway,\n :param gateway_direction: string object. Accepted values - \"Unspecified\", \"Converging\", \"Diverging\", \"Mixed\".\n Default value - \"Unspecified\",\n :param node_id: string object. ID of node. Default value - None.\n\n :return: a tuple, where first value is parallelGateway ID, second a reference to created object.\n "
] |
Please provide a description of the function:def add_sequence_flow_to_diagram(self, process_id, source_ref_id, target_ref_id, sequence_flow_name=""):
sequence_flow_id = BpmnDiagramGraph.id_prefix + str(uuid.uuid4())
self.sequence_flows[sequence_flow_id] = {consts.Consts.name: sequence_flow_name,
consts.Consts.source_ref: source_ref_id,
consts.Consts.target_ref: target_ref_id}
self.diagram_graph.add_edge(source_ref_id, target_ref_id)
flow = self.diagram_graph[source_ref_id][target_ref_id]
flow[consts.Consts.id] = sequence_flow_id
flow[consts.Consts.name] = sequence_flow_name
flow[consts.Consts.process] = process_id
flow[consts.Consts.source_ref] = source_ref_id
flow[consts.Consts.target_ref] = target_ref_id
source_node = self.diagram_graph.node[source_ref_id]
target_node = self.diagram_graph.node[target_ref_id]
flow[consts.Consts.waypoints] = \
[(source_node[consts.Consts.x], source_node[consts.Consts.y]),
(target_node[consts.Consts.x], target_node[consts.Consts.y])]
# add target node (target_ref_id) as outgoing node from source node (source_ref_id)
source_node[consts.Consts.outgoing_flow].append(sequence_flow_id)
# add source node (source_ref_id) as incoming node to target node (target_ref_id)
target_node[consts.Consts.incoming_flow].append(sequence_flow_id)
return sequence_flow_id, flow | [
"\n Adds a SequenceFlow element to BPMN diagram.\n Requires that user passes a sourceRef and targetRef as parameters.\n User-defined attributes:\n\n - name\n\n :param process_id: string object. ID of parent process,\n :param source_ref_id: string object. ID of source node,\n :param target_ref_id: string object. ID of target node,\n :param sequence_flow_name: string object. Name of sequence flow.\n :return: a tuple, where first value is sequenceFlow ID, second a reference to created object.\n "
] |
Please provide a description of the function:def get_nodes_positions(self):
nodes = self.get_nodes()
output = {}
for node in nodes:
output[node[0]] = (float(node[1][consts.Consts.x]), float(node[1][consts.Consts.y]))
return output | [
"\n Getter method for nodes positions.\n\n :return: A dictionary with nodes as keys and positions as values\n "
] |
Please provide a description of the function:def set_condition(self, value):
if value is None or not isinstance(value, str):
raise TypeError("Condition is required and must be set to a String")
else:
self.__condition = value | [
"\n Setter for 'condition' field.\n :param value - a new value of 'condition' field. Required field. Must be a String.\n "
] |
Please provide a description of the function:def os_walk_pre_35(top, topdown=True, onerror=None, followlinks=False):
islink, join, isdir = os.path.islink, os.path.join, os.path.isdir
try:
names = os.listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in os_walk_pre_35(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs | [
"Pre Python 3.5 implementation of os.walk() that doesn't use scandir."
] |
Please provide a description of the function:def create_tree(path, depth=DEPTH):
os.mkdir(path)
for i in range(NUM_FILES):
filename = os.path.join(path, 'file{0:03}.txt'.format(i))
with open(filename, 'wb') as f:
f.write(b'foo')
if depth <= 1:
return
for i in range(NUM_DIRS):
dirname = os.path.join(path, 'dir{0:03}'.format(i))
create_tree(dirname, depth - 1) | [
"Create a directory tree at path with given depth, and NUM_DIRS and\n NUM_FILES at each level.\n "
] |
Please provide a description of the function:def get_tree_size(path):
size = 0
try:
for entry in scandir.scandir(path):
if entry.is_symlink():
pass
elif entry.is_dir():
size += get_tree_size(os.path.join(path, entry.name))
else:
size += entry.stat().st_size
except OSError:
pass
return size | [
"Return total size of all files in directory tree at path."
] |
Please provide a description of the function:def unfold(tensor, mode):
return np.moveaxis(tensor, mode, 0).reshape((tensor.shape[mode], -1)) | [
"Returns the mode-`mode` unfolding of `tensor`.\n\n Parameters\n ----------\n tensor : ndarray\n mode : int\n\n Returns\n -------\n ndarray\n unfolded_tensor of shape ``(tensor.shape[mode], -1)``\n\n Author\n ------\n Jean Kossaifi <https://github.com/tensorly>\n "
] |
Please provide a description of the function:def khatri_rao(matrices):
n_columns = matrices[0].shape[1]
n_factors = len(matrices)
start = ord('a')
common_dim = 'z'
target = ''.join(chr(start + i) for i in range(n_factors))
source = ','.join(i+common_dim for i in target)
operation = source+'->'+target+common_dim
return np.einsum(operation, *matrices).reshape((-1, n_columns)) | [
"Khatri-Rao product of a list of matrices.\n\n Parameters\n ----------\n matrices : list of ndarray\n\n Returns\n -------\n khatri_rao_product: matrix of shape ``(prod(n_i), m)``\n where ``prod(n_i) = prod([m.shape[0] for m in matrices])``\n i.e. the product of the number of rows of all the matrices in the\n product.\n\n Author\n ------\n Jean Kossaifi <https://github.com/tensorly>\n "
] |
Please provide a description of the function:def soft_cluster_factor(factor):
# copy factor of interest
f = np.copy(factor)
# cluster based on score of maximum absolute value
cluster_ids = np.argmax(np.abs(f), axis=1)
scores = f[range(f.shape[0]), cluster_ids]
# resort within each cluster
perm = []
for cluster in np.unique(cluster_ids):
idx = np.where(cluster_ids == cluster)[0]
perm += list(idx[np.argsort(scores[idx])][::-1])
return cluster_ids, perm | [
"Returns soft-clustering of data based on CP decomposition results.\n\n Parameters\n ----------\n data : ndarray, N x R matrix of nonnegative data\n Datapoints are held in rows, features are held in columns\n\n Returns\n -------\n cluster_ids : ndarray, vector of N integers in range(0, R)\n List of soft cluster assignments for each row of data matrix\n perm : ndarray, vector of N integers\n Permutation / ordering of the rows of data induced by the soft\n clustering.\n "
] |
Please provide a description of the function:def tsp_linearize(data, niter=1000, metric='euclidean', **kwargs):
# Compute pairwise distances between all datapoints
N = data.shape[0]
D = scipy.spatial.distance.pdist(data, metric=metric, **kwargs)
# To solve the travelling salesperson problem with no return to the
# original node we add a dummy node that has distance zero connections
# to all other nodes. The dummy node is then removed after we've converged
# to a solution.
dist = np.zeros((N+1, N+1))
dist[:N, :N] = scipy.spatial.distance.squareform(D)
# solve TSP
perm, cost_hist = _solve_tsp(dist)
# remove dummy node at position i
i = np.argwhere(perm == N).ravel()[0]
perm = np.hstack((perm[(i+1):], perm[:i]))
return perm | [
"Sorts a matrix dataset to (approximately) solve the traveling\n salesperson problem. The matrix can be re-sorted so that sequential rows\n represent datapoints that are close to each other based on some\n user-defined distance metric. Uses 2-opt local search algorithm.\n\n Args\n ----\n data : ndarray, N x R matrix of data\n Datapoints are held in rows, features are held in columns\n\n Returns\n -------\n perm : ndarray, vector of N integers\n Permutation / ordering of the rows of data that approximately\n solves the travelling salesperson problem.\n "
] |
Please provide a description of the function:def hclust_linearize(U):
from scipy.cluster import hierarchy
Z = hierarchy.ward(U)
return hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, U)) | [
"Sorts the rows of a matrix by hierarchical clustering.\n\n Parameters:\n U (ndarray) : matrix of data\n\n Returns:\n prm (ndarray) : permutation of the rows\n "
] |
Please provide a description of the function:def reverse_segment(path, n1, n2):
q = path.copy()
if n2 > n1:
q[n1:(n2+1)] = path[n1:(n2+1)][::-1]
return q
else:
seg = np.hstack((path[n1:], path[:(n2+1)]))[::-1]
brk = len(q) - n1
q[n1:] = seg[:brk]
q[:(n2+1)] = seg[brk:]
return q | [
"Reverse the nodes between n1 and n2.\n "
] |
Please provide a description of the function:def _solve_tsp(dist, niter):
# number of nodes
N = dist.shape[0]
# tsp path for quick calculation of cost
ii = np.arange(N)
jj = np.hstack((np.arange(1, N), 0))
# for each node, cache a sorted list of all other nodes in order of
# increasing distance.
dsort = [np.argsort(d) for d in dist]
dsort = [d[d != i] for i, d in enumerate(dsort)]
# randomly initialize path through graph
path = np.random.permutation(N)
idx = np.argsort(path)
cost = np.sum(dist[path[ii], path[jj]])
# keep track of objective function over time
cost_hist = [cost]
# optimization loop
node = 0
itercount = 0
n = 0
while n < N and itercount < niter:
# count iterations
itercount += 1
# we'll try breaking the connection i -> j
i = path[node]
j = path[(node+1) % N]
# We are breaking i -> j so we can remove the cost of that connection.
c = cost - dist[i, j]
# Search over nodes k that are closer to j than i.
for k in dsort[j]:
# Can safely continue if dist[i,j] < dist[k,j] for the remaining k.
if k == i:
n += 1
break
# Break connection k -> p.
# Add connection j -> p.
# Add connection i -> k.
p = path[(idx[k]+1) % N]
new_cost = c - dist[k, p] + dist[j, p] + dist[i, k]
# If this swap improves the cost, implement it and move to next i.
if new_cost < cost:
path = reverse_segment(path, idx[j], idx[k])
idx = np.argsort(path)
cost = new_cost
# Restart from the begining of the graph.
cost_hist.append(cost)
n = 0
break
# move to next node
node = (node + 1) % N
return path, cost_hist | [
"Solve travelling salesperson problem (TSP) by two-opt swapping.\n\n Params\n ------\n dist (ndarray) : distance matrix\n\n Returns\n -------\n path (ndarray) : permutation of nodes in graph (rows of dist matrix)\n "
] |
Please provide a description of the function:def full(self):
# Compute tensor unfolding along first mode
unf = sci.dot(self.factors[0], khatri_rao(self.factors[1:]).T)
# Inverse unfolding along first mode
return sci.reshape(unf, self.shape) | [
"Converts KTensor to a dense ndarray."
] |
Please provide a description of the function:def rebalance(self):
# Compute norms along columns for each factor matrix
norms = [sci.linalg.norm(f, axis=0) for f in self.factors]
# Multiply norms across all modes
lam = sci.multiply.reduce(norms) ** (1/self.ndim)
# Update factors
self.factors = [f * (lam / fn) for f, fn in zip(self.factors, norms)]
return self | [
"Rescales factors across modes so that all norms match.\n "
] |
Please provide a description of the function:def permute(self, idx):
# Check that input is a true permutation
if set(idx) != set(range(self.rank)):
raise ValueError('Invalid permutation specified.')
# Update factors
self.factors = [f[:, idx] for f in self.factors]
return self.factors | [
"Permutes the columns of the factor matrices inplace\n "
] |
Please provide a description of the function:def kruskal_align(U, V, permute_U=False, permute_V=False):
# Compute similarity matrices.
unrm = [f / np.linalg.norm(f, axis=0) for f in U.factors]
vnrm = [f / np.linalg.norm(f, axis=0) for f in V.factors]
sim_matrices = [np.dot(u.T, v) for u, v in zip(unrm, vnrm)]
cost = 1 - np.mean(np.abs(sim_matrices), axis=0)
# Solve matching problem via Hungarian algorithm.
indices = Munkres().compute(cost.copy())
prmU, prmV = zip(*indices)
# Compute mean factor similarity given the optimal matching.
similarity = np.mean(1 - cost[prmU, prmV])
# If U and V are of different ranks, identify unmatched factors.
unmatched_U = list(set(range(U.rank)) - set(prmU))
unmatched_V = list(set(range(V.rank)) - set(prmV))
# If permuting both U and V, order factors from most to least similar.
if permute_U and permute_V:
idx = np.argsort(cost[prmU, prmV])
# If permute_U is False, then order the factors such that the ordering
# for U is unchanged.
elif permute_V:
idx = np.argsort(prmU)
# If permute_V is False, then order the factors such that the ordering
# for V is unchanged.
elif permute_U:
idx = np.argsort(prmV)
# If permute_U and permute_V are both False, then we are done and can
# simply return the similarity.
else:
return similarity
# Re-order the factor permutations.
prmU = [prmU[i] for i in idx]
prmV = [prmV[i] for i in idx]
# Permute the factors.
if permute_U:
U.permute(prmU)
if permute_V:
V.permute(prmV)
# Flip the signs of factors.
flips = np.sign([F[prmU, prmV] for F in sim_matrices])
flips[0] *= np.prod(flips, axis=0) # always flip an even number of factors
if permute_U:
for i, f in enumerate(flips):
U.factors[i] *= f
elif permute_V:
for i, f in enumerate(flips):
V.factors[i] *= f
# Return the similarity score
return similarity | [
"Aligns two KTensors and returns a similarity score.\n\n Parameters\n ----------\n U : KTensor\n First kruskal tensor to align.\n V : KTensor\n Second kruskal tensor to align.\n permute_U : bool\n If True, modifies 'U' to align the KTensors (default is False).\n permute_V : bool\n If True, modifies 'V' to align the KTensors (default is False).\n\n Notes\n -----\n If both `permute_U` and `permute_V` are both set to True, then the\n factors are ordered from most to least similar. If only one is\n True then the factors on the modified KTensor are re-ordered to\n match the factors in the un-aligned KTensor.\n\n Returns\n -------\n similarity : float\n Similarity score between zero and one.\n "
] |
Please provide a description of the function:def plot_objective(ensemble, partition='train', ax=None, jitter=0.1,
scatter_kw=dict(), line_kw=dict()):
if ax is None:
ax = plt.gca()
if partition == 'train':
pass
elif partition == 'test':
raise NotImplementedError('Cross-validation is on the TODO list.')
else:
raise ValueError("partition must be 'train' or 'test'.")
# compile statistics for plotting
x, obj, min_obj = [], [], []
for rank in sorted(ensemble.results):
# reconstruction errors for rank-r models
o = ensemble.objectives(rank)
obj.extend(o)
x.extend(np.full(len(o), rank))
min_obj.append(min(o))
# add horizontal jitter
ux = np.unique(x)
x = np.array(x) + (np.random.rand(len(x))-0.5)*jitter
# make plot
ax.scatter(x, obj, **scatter_kw)
ax.plot(ux, min_obj, **line_kw)
ax.set_xlabel('model rank')
ax.set_ylabel('objective')
return ax | [
"Plots objective function as a function of model rank.\n\n Parameters\n ----------\n ensemble : Ensemble object\n holds optimization results across a range of model ranks\n partition : string, one of: {'train', 'test'}\n specifies whether to plot the objective function on the training\n data or the held-out test set.\n ax : matplotlib axis (optional)\n axis to plot on (defaults to current axis object)\n jitter : float (optional)\n amount of horizontal jitter added to scatterpoints (default=0.1)\n scatter_kw : dict (optional)\n keyword arguments for styling the scatterpoints\n line_kw : dict (optional)\n keyword arguments for styling the line\n "
] |
Please provide a description of the function:def plot_similarity(ensemble, ax=None, jitter=0.1,
scatter_kw=dict(), line_kw=dict()):
if ax is None:
ax = plt.gca()
# compile statistics for plotting
x, sim, mean_sim = [], [], []
for rank in sorted(ensemble.results):
# reconstruction errors for rank-r models
s = ensemble.similarities(rank)[1:]
sim.extend(s)
x.extend(np.full(len(s), rank))
mean_sim.append(np.mean(s))
# add horizontal jitter
ux = np.unique(x)
x = np.array(x) + (np.random.rand(len(x))-0.5)*jitter
# make plot
ax.scatter(x, sim, **scatter_kw)
ax.plot(ux, mean_sim, **line_kw)
ax.set_xlabel('model rank')
ax.set_ylabel('model similarity')
ax.set_ylim([0, 1.1])
return ax | [
"Plots similarity across optimization runs as a function of model rank.\n\n Parameters\n ----------\n ensemble : Ensemble object\n holds optimization results across a range of model ranks\n ax : matplotlib axis (optional)\n axis to plot on (defaults to current axis object)\n jitter : float (optional)\n amount of horizontal jitter added to scatterpoints (default=0.1)\n scatter_kw : dict (optional)\n keyword arguments for styling the scatterpoints\n line_kw : dict (optional)\n keyword arguments for styling the line\n\n References\n ----------\n Ulrike von Luxburg (2010). Clustering Stability: An Overview.\n Foundations and Trends in Machine Learning.\n https://arxiv.org/abs/1007.1075\n\n "
] |
Please provide a description of the function:def plot_factors(U, plots='line', fig=None, axes=None, scatter_kw=dict(),
line_kw=dict(), bar_kw=dict(), **kwargs):
# ~~~~~~~~~~~~~
# PARSE OPTIONS
# ~~~~~~~~~~~~~
kwargs.setdefault('figsize', (8, U.rank))
# parse optional inputs
plots = _broadcast_arg(U, plots, str, 'plots')
bar_kw = _broadcast_arg(U, bar_kw, dict, 'bar_kw')
line_kw = _broadcast_arg(U, line_kw, dict, 'line_kw')
scatter_kw = _broadcast_arg(U, scatter_kw, dict, 'scatter_kw')
# default scatterplot options
for sckw in scatter_kw:
sckw.setdefault('edgecolor', 'none')
sckw.setdefault('s', 10)
# ~~~~~~~~~~~~~~
# SETUP SUBPLOTS
# ~~~~~~~~~~~~~~
if fig is None and axes is None:
fig, axes = plt.subplots(U.rank, U.ndim, **kwargs)
# make sure axes is a 2d-array
if U.rank == 1:
axes = axes[None, :]
# if axes are passed in, identify figure
elif fig is None:
fig = axes[0, 0].get_figure()
# if figure is passed, identify axes
else:
axes = np.array(fig.get_axes(), dtype=object).reshape(U.rank, U.ndim)
# main loop, plot each factor
plot_obj = np.empty((U.rank, U.ndim), dtype=object)
for r in range(U.rank):
for i, f in enumerate(U):
# start plots at 1 instead of zero
x = np.arange(1, f.shape[0]+1)
# determine type of plot
if plots[i] == 'bar':
plot_obj[r, i] = axes[r, i].bar(x, f[:, r], **bar_kw[i])
axes[r, i].set_xlim(0, f.shape[0]+1)
elif plots[i] == 'scatter':
plot_obj[r, i] = axes[r, i].scatter(x, f[:, r], **scatter_kw[i])
axes[r, i].set_xlim(0, f.shape[0])
elif plots[i] == 'line':
plot_obj[r, i] = axes[r, i].plot(f[:, r], '-', **line_kw[i])
axes[r, i].set_xlim(0, f.shape[0])
else:
raise ValueError('invalid plot type')
# format axes
axes[r, i].locator_params(nbins=4)
axes[r, i].spines['top'].set_visible(False)
axes[r, i].spines['right'].set_visible(False)
axes[r, i].xaxis.set_tick_params(direction='out')
axes[r, i].yaxis.set_tick_params(direction='out')
axes[r, i].yaxis.set_ticks_position('left')
axes[r, i].xaxis.set_ticks_position('bottom')
# remove xticks on all but bottom row
if r != U.rank-1:
plt.setp(axes[r, i].get_xticklabels(), visible=False)
# link y-axes within columns
for i in range(U.ndim):
yl = [a.get_ylim() for a in axes[:, i]]
y0, y1 = min([y[0] for y in yl]), max([y[1] for y in yl])
[a.set_ylim((y0, y1)) for a in axes[:, i]]
# format y-ticks
for r in range(U.rank):
for i in range(U.ndim):
# only two labels
ymin, ymax = np.round(axes[r, i].get_ylim(), 2)
axes[r, i].set_ylim((ymin, ymax))
# remove decimals from labels
if ymin.is_integer():
ymin = int(ymin)
if ymax.is_integer():
ymax = int(ymax)
# update plot
axes[r, i].set_yticks([ymin, ymax])
plt.tight_layout()
return fig, axes, plot_obj | [
"Plots a KTensor.\n\n Note: Each keyword option is broadcast to all modes of the KTensor. For\n example, if `U` is a 3rd-order tensor (i.e. `U.ndim == 3`) then\n `plot_factors(U, plots=['line','bar','scatter'])` plots all factors for the\n first mode as a line plot, the second as a bar plot, and the third mode as\n a scatterplot. But, thanks to broadcasting semantics,\n `plot_factors(U, color='line')` produces line plots for each mode.\n\n Parameters\n ----------\n U : KTensor\n Kruskal tensor to be plotted.\n\n plots : str or list\n One of {'bar','line','scatter'} to specify the type of plot for each\n factor. The default is 'line'.\n fig : matplotlib Figure object\n If provided, add plots to the specified figure. The figure must have a\n sufficient number of axes objects.\n axes : 2d numpy array of matplotlib Axes objects\n If provided, add plots to the specified figure.\n scatter_kw : dict or sequence of dicts\n Keyword arguments provided to scatterplots. If a single dict is\n provided, these options are broadcasted to all modes.\n line_kw : dict or sequence of dicts\n Keyword arguments provided to line plots. If a single dict is provided,\n these options are broadcasted to all modes.\n bar_kw : dict or sequence of dicts\n Keyword arguments provided to bar plots. If a single dict is provided,\n these options are broadcasted to all modes.\n **kwargs : dict\n Additional keyword parameters are passed to the `subplots(...)`\n function to specify options such as `figsize` and `gridspec_kw`. See\n `matplotlib.pyplot.subplots(...)` documentation for more info.\n "
] |
Please provide a description of the function:def _broadcast_arg(U, arg, argtype, name):
# if input is not iterable, broadcast it all dimensions of the tensor
if arg is None or isinstance(arg, argtype):
return [arg for _ in range(U.ndim)]
# check if iterable input is valid
elif np.iterable(arg):
if len(arg) != U.ndim:
raise ValueError('Parameter {} was specified as a sequence of '
'incorrect length. The length must match the '
'number of tensor dimensions '
'(U.ndim={})'.format(name, U.ndim))
elif not all([isinstance(a, argtype) for a in arg]):
raise TypeError('Parameter {} specified as a sequence of '
'incorrect type. '
'Expected {}.'.format(name, argtype))
else:
return arg
# input is not iterable and is not the corrent type.
else:
raise TypeError('Parameter {} specified as a {}.'
' Expected {}.'.format(name, type(arg), argtype)) | [
"Broadcasts plotting option `arg` to all factors.\n\n Args:\n U : KTensor\n arg : argument provided by the user\n argtype : expected type for arg\n name : name of the variable, used for error handling\n\n Returns:\n iterable version of arg of length U.ndim\n "
] |
Please provide a description of the function:def _check_cpd_inputs(X, rank):
if X.ndim < 3:
raise ValueError("Array with X.ndim > 2 expected.")
if rank <= 0 or not isinstance(rank, int):
raise ValueError("Rank is invalid.") | [
"Checks that inputs to optimization function are appropriate.\n\n Parameters\n ----------\n X : ndarray\n Tensor used for fitting CP decomposition.\n rank : int\n Rank of low rank decomposition.\n\n Raises\n ------\n ValueError: If inputs are not suited for CP decomposition.\n "
] |
Please provide a description of the function:def _get_initial_ktensor(init, X, rank, random_state, scale_norm=True):
normX = linalg.norm(X) if scale_norm else None
if init == 'randn':
# TODO - match the norm of the initialization to the norm of X.
U = randn_ktensor(X.shape, rank, norm=normX, random_state=random_state)
elif init == 'rand':
# TODO - match the norm of the initialization to the norm of X.
U = rand_ktensor(X.shape, rank, norm=normX, random_state=random_state)
elif isinstance(init, KTensor):
U = init.copy()
else:
raise ValueError("Expected 'init' to either be a KTensor or a string "
"specifying how to initialize optimization. Valid "
"strings are ('randn', 'rand').")
return U, normX | [
"\n Parameters\n ----------\n init : str\n Specifies type of initializations ('randn', 'rand')\n X : ndarray\n Tensor that the decomposition is fit to.\n rank : int\n Rank of decomposition\n random_state : RandomState or int\n Specifies seed for random number generator\n scale_norm : bool\n If True, norm is scaled to match X (default: True)\n\n Returns\n -------\n U : KTensor\n Initial factor matrices used optimization.\n normX : float\n Frobenious norm of tensor data.\n "
] |
Please provide a description of the function:def still_optimizing(self):
# Check if we need to give up on optimizing.
if (self.iterations > self.max_iter) or (self.time_elapsed() > self.max_time):
return False
# Always optimize for at least 'min_iter' iterations.
elif not hasattr(self, 'improvement') or (self.iterations < self.min_iter):
return True
# Check convergence.
else:
self.converged = self.improvement < self.tol
return False if self.converged else True | [
"True unless converged or maximum iterations/time exceeded."
] |
Please provide a description of the function:def _check_random_state(random_state):
if random_state is None or isinstance(random_state, int):
return sci.random.RandomState(random_state)
elif isinstance(random_state, sci.random.RandomState):
return random_state
else:
raise TypeError('Seed should be None, int or np.random.RandomState') | [
"Checks and processes user input for seeding random numbers.\n\n Parameters\n ----------\n random_state : int, RandomState instance or None\n If int, a RandomState instance is created with this integer seed.\n If RandomState instance, random_state is returned;\n If None, a RandomState instance is created with arbitrary seed.\n\n Returns\n -------\n scipy.random.RandomState instance\n\n Raises\n ------\n TypeError\n If ``random_state`` is not appropriately set.\n "
] |
Please provide a description of the function:def randn_ktensor(shape, rank, norm=None, random_state=None):
# Check input.
rns = _check_random_state(random_state)
# Draw low-rank factor matrices with i.i.d. Gaussian elements.
factors = KTensor([rns.standard_normal((i, rank)) for i in shape])
return _rescale_tensor(factors, norm) | [
"\n Generates a random N-way tensor with rank R, where the entries are\n drawn from the standard normal distribution.\n\n Parameters\n ----------\n shape : tuple\n shape of the tensor\n\n rank : integer\n rank of the tensor\n\n norm : float or None, optional (defaults: None)\n If not None, the factor matrices are rescaled so that the Frobenius\n norm of the returned tensor is equal to ``norm``.\n\n random_state : integer, RandomState instance or None, optional (default ``None``)\n If integer, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used by np.random.\n\n\n Returns\n -------\n X : (I_1, ..., I_N) array_like\n N-way tensor with rank R.\n\n Example\n -------\n >>> # Create a rank-2 tensor of dimension 5x5x5:\n >>> import tensortools as tt\n >>> X = tt.randn_tensor((5,5,5), rank=2)\n\n "
] |
Please provide a description of the function:def rand_ktensor(shape, rank, norm=None, random_state=None):
# Check input.
rns = _check_random_state(random_state)
# Randomize low-rank factor matrices i.i.d. uniform random elements.
factors = KTensor([rns.uniform(0.0, 1.0, size=(i, rank)) for i in shape])
return _rescale_tensor(factors, norm) | [
"\n Generates a random N-way tensor with rank R, where the entries are\n drawn from the standard uniform distribution in the interval [0.0,1].\n\n Parameters\n ----------\n shape : tuple\n shape of the tensor\n\n rank : integer\n rank of the tensor\n\n norm : float or None, optional (defaults: None)\n If not None, the factor matrices are rescaled so that the Frobenius\n norm of the returned tensor is equal to ``norm``.\n\n ktensor : bool\n If true, a KTensor object is returned, i.e., the components are in factored\n form ``[U_1, U_2, ... U_N]``; Otherwise an N-way array is returned.\n\n random_state : integer, RandomState instance or None, optional (default ``None``)\n If integer, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used by np.random.\n\n\n Returns\n -------\n X : (I_1, ..., I_N) array_like\n N-way tensor with rank R.\n\n Example\n -------\n >>> # Create a rank-2 tensor of dimension 5x5x5:\n >>> import tensortools as tt\n >>> X = tt.randn_tensor((5,5,5), rank=2)\n\n "
] |
Please provide a description of the function:def mcp_als(X, rank, mask, random_state=None, init='randn', **options):
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, _ = optim_utils._get_initial_ktensor(init, X, rank, random_state, scale_norm=False)
result = FitResult(U, 'MCP_ALS', **options)
normX = np.linalg.norm((X * mask))
# Main optimization loop.
while result.still_optimizing:
# Iterate over each tensor mode.
for n in range(X.ndim):
# i) Normalize factors to prevent singularities.
U.rebalance()
# ii) Unfold data and mask along the nth mode.
unf = unfold(X, n) # i_n x N
m = unfold(mask, n) # i_n x N
# iii) Form Khatri-Rao product of factors matrices.
components = [U[j] for j in range(X.ndim) if j != n]
krt = khatri_rao(components).T # N x r
# iv) Broadcasted solve of linear systems.
# Left hand side of equations, R x R x X.shape[n]
# Right hand side of equations, X.shape[n] x R x 1
lhs_stack = np.matmul(m[:, None, :] * krt[None, :, :], krt.T[None, :, :])
rhs_stack = np.dot(unf * m, krt.T)[:, :, None]
# vi) Update factor.
U[n] = np.linalg.solve(lhs_stack, rhs_stack).reshape(X.shape[n], rank)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[-1].T.dot(U[-1])
# obj = np.sqrt(np.sum(grams) - 2*sci.sum(p*U[-1]) + normX**2) / normX
obj = linalg.norm(mask * (U.full() - X)) / normX
# Update result
result.update(obj)
# Finalize and return the optimization result.
return result.finalize() | [
"Fits CP Decomposition with missing data using Alternating Least Squares (ALS).\n\n Parameters\n ----------\n X : (I_1, ..., I_N) array_like\n A tensor with ``X.ndim >= 3``.\n\n rank : integer\n The `rank` sets the number of components to be computed.\n\n mask : (I_1, ..., I_N) array_like\n A binary tensor with the same shape as ``X``. All entries equal to zero\n correspond to held out or missing data in ``X``. All entries equal to\n one correspond to observed entries in ``X`` and the decomposition is\n fit to these datapoints.\n\n random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)\n If integer, sets the seed of the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, use the RandomState instance used by ``numpy.random``.\n\n init : str, or KTensor, optional (default ``'randn'``).\n Specifies initial guess for KTensor factor matrices.\n If ``'randn'``, Gaussian random numbers are used to initialize.\n If ``'rand'``, uniform random numbers are used to initialize.\n If KTensor instance, a copy is made to initialize the optimization.\n\n options : dict, specifying fitting options.\n\n tol : float, optional (default ``tol=1E-5``)\n Stopping tolerance for reconstruction error.\n\n max_iter : integer, optional (default ``max_iter = 500``)\n Maximum number of iterations to perform before exiting.\n\n min_iter : integer, optional (default ``min_iter = 1``)\n Minimum number of iterations to perform before exiting.\n\n max_time : integer, optional (default ``max_time = np.inf``)\n Maximum computational time before exiting.\n\n verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)\n Display progress.\n\n\n Returns\n -------\n result : FitResult instance\n Object which holds the fitted results. It provides the factor matrices\n in form of a KTensor, ``result.factors``.\n\n\n Notes\n -----\n Fitting CP decompositions with missing data can be exploited to perform\n cross-validation.\n\n References\n ----------\n Williams, A. H.\n \"Solving Least-Squares Regression with Missing Data.\"\n http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/\n "
] |
Please provide a description of the function:def ncp_bcd(X, rank, random_state=None, init='rand', **options):
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Store norm of X for computing objective function.
N = X.ndim
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_BCD', **options)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Block coordinate descent
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Um = U.copy() # Extrapolations of compoenents
extraw = 1 # Used for extrapolation weight update
weights_U = np.ones(N) # Extrapolation weights
L = np.ones(N) # Lipschitz constants
obj_bcd = 0.5 * normX**2 # Initial objective value
# Main optimization loop.
while result.still_optimizing:
obj_bcd_old = obj_bcd # Old objective value
U_old = U.copy()
extraw_old = extraw
for n in range(N):
# Select all components, but U_n
components = [U[j] for j in range(N) if j != n]
# i) compute the N-1 gram matrices
grams = sci.multiply.reduce([arr.T.dot(arr) for arr in components])
# Update gradient Lipschnitz constant
L0 = L # Lipschitz constants
L[n] = linalg.norm(grams, 2)
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot( kr )
# Compute Gradient.
grad = Um[n] .dot(grams) - p
# Enforce nonnegativity (project onto nonnegative orthant).
U[n] = sci.maximum(0.0, Um[n] - grad / L[n])
# Compute objective function and update optimization result.
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt(sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2) / normX
obj = linalg.norm(X - U.full()) / normX
result.update(obj)
# Correction and extrapolation.
grams *= U[N - 1].T.dot(U[N - 1])
obj_bcd = 0.5 * (sci.sum(grams) - 2 * sci.sum(U[N-1] * p) + normX**2 )
extraw = (1 + sci.sqrt(1 + 4 * extraw_old**2)) / 2.0
if obj_bcd >= obj_bcd_old:
# restore previous A to make the objective nonincreasing
Um = sci.copy(U_old)
else:
# apply extrapolation
w = (extraw_old - 1.0) / extraw # Extrapolation weight
for n in range(N):
weights_U[n] = min(w, 1.0 * sci.sqrt( L0[n] / L[n] )) # choose smaller weights for convergence
Um[n] = U[n] + weights_U[n] * (U[n] - U_old[n]) # extrapolation
# Finalize and return the optimization result.
return result.finalize() | [
"\n Fits nonnegative CP Decomposition using the Block Coordinate Descent (BCD)\n Method.\n\n Parameters\n ----------\n X : (I_1, ..., I_N) array_like\n A real array with nonnegative entries and ``X.ndim >= 3``.\n\n rank : integer\n The `rank` sets the number of components to be computed.\n\n random_state : integer, RandomState instance or None, optional (default ``None``)\n If integer, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used by np.random.\n\n init : str, or KTensor, optional (default ``'rand'``).\n Specifies initial guess for KTensor factor matrices.\n If ``'randn'``, Gaussian random numbers are used to initialize.\n If ``'rand'``, uniform random numbers are used to initialize.\n If KTensor instance, a copy is made to initialize the optimization.\n\n options : dict, specifying fitting options.\n\n tol : float, optional (default ``tol=1E-5``)\n Stopping tolerance for reconstruction error.\n\n max_iter : integer, optional (default ``max_iter = 500``)\n Maximum number of iterations to perform before exiting.\n\n min_iter : integer, optional (default ``min_iter = 1``)\n Minimum number of iterations to perform before exiting.\n\n max_time : integer, optional (default ``max_time = np.inf``)\n Maximum computational time before exiting.\n\n verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)\n Display progress.\n\n\n Returns\n -------\n result : FitResult instance\n Object which holds the fitted results. It provides the factor matrices\n in form of a KTensor, ``result.factors``.\n\n\n Notes\n -----\n This implemenation is using the Block Coordinate Descent Method.\n\n\n References\n ----------\n Xu, Yangyang, and Wotao Yin. \"A block coordinate descent method for\n regularized multiconvex optimization with applications to\n negative tensor factorization and completion.\"\n SIAM Journal on imaging sciences 6.3 (2013): 1758-1789.\n\n\n Examples\n --------\n\n "
] |
Please provide a description of the function:def ncp_hals(X, rank, random_state=None, init='rand', **options):
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = linalg.norm(X)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
violation = 0.0
for n in range(X.ndim):
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = sci.multiply.reduce([arr.T.dot(arr) for arr in components])
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot(kr)
# iii) Update component U_n
violation += _hals_update(U[n], grams, p)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt( (sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2)) / normX
result.update(linalg.norm(X - U.full()) / normX)
# end optimization loop, return result.
return result.finalize() | [
"\n Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least\n Squares (HALS) Method.\n\n Parameters\n ----------\n X : (I_1, ..., I_N) array_like\n A real array with nonnegative entries and ``X.ndim >= 3``.\n\n rank : integer\n The `rank` sets the number of components to be computed.\n\n random_state : integer, RandomState instance or None, optional (default ``None``)\n If integer, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used by np.random.\n\n init : str, or KTensor, optional (default ``'rand'``).\n Specifies initial guess for KTensor factor matrices.\n If ``'randn'``, Gaussian random numbers are used to initialize.\n If ``'rand'``, uniform random numbers are used to initialize.\n If KTensor instance, a copy is made to initialize the optimization.\n\n options : dict, specifying fitting options.\n\n tol : float, optional (default ``tol=1E-5``)\n Stopping tolerance for reconstruction error.\n\n max_iter : integer, optional (default ``max_iter = 500``)\n Maximum number of iterations to perform before exiting.\n\n min_iter : integer, optional (default ``min_iter = 1``)\n Minimum number of iterations to perform before exiting.\n\n max_time : integer, optional (default ``max_time = np.inf``)\n Maximum computational time before exiting.\n\n verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)\n Display progress.\n\n\n Returns\n -------\n result : FitResult instance\n Object which holds the fitted results. It provides the factor matrices\n in form of a KTensor, ``result.factors``.\n\n\n Notes\n -----\n This implemenation is using the Hierarcial Alternating Least Squares Method.\n\n\n References\n ----------\n Cichocki, Andrzej, and P. H. A. N. Anh-Huy. \"Fast local algorithms for\n large scale nonnegative matrix and tensor factorizations.\"\n IEICE transactions on fundamentals of electronics, communications and\n computer sciences 92.3: 708-721, 2009.\n\n Examples\n --------\n\n\n "
] |
Please provide a description of the function:def cp_als(X, rank, random_state=None, init='randn', **options):
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'CP_ALS', **options)
# Main optimization loop.
while result.still_optimizing:
# Iterate over each tensor mode.
for n in range(X.ndim):
# i) Normalize factors to prevent singularities.
U.rebalance()
# ii) Compute the N-1 gram matrices.
components = [U[j] for j in range(X.ndim) if j != n]
grams = sci.multiply.reduce([sci.dot(u.T, u) for u in components])
# iii) Compute Khatri-Rao product.
kr = khatri_rao(components)
# iv) Form normal equations and solve via Cholesky
c = linalg.cho_factor(grams, overwrite_a=False)
p = unfold(X, n).dot(kr)
U[n] = linalg.cho_solve(c, p.T, overwrite_b=False).T
# U[n] = linalg.solve(grams, unfold(X, n).dot(kr).T).T
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[-1].T.dot(U[-1])
# obj = np.sqrt(np.sum(grams) - 2*sci.sum(p*U[-1]) + normX**2) / normX
obj = linalg.norm(U.full() - X) / normX
# Update result
result.update(obj)
# Finalize and return the optimization result.
return result.finalize() | [
"Fits CP Decomposition using Alternating Least Squares (ALS).\n\n Parameters\n ----------\n X : (I_1, ..., I_N) array_like\n A tensor with ``X.ndim >= 3``.\n\n rank : integer\n The `rank` sets the number of components to be computed.\n\n random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)\n If integer, sets the seed of the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, use the RandomState instance used by ``numpy.random``.\n\n init : str, or KTensor, optional (default ``'randn'``).\n Specifies initial guess for KTensor factor matrices.\n If ``'randn'``, Gaussian random numbers are used to initialize.\n If ``'rand'``, uniform random numbers are used to initialize.\n If KTensor instance, a copy is made to initialize the optimization.\n\n options : dict, specifying fitting options.\n\n tol : float, optional (default ``tol=1E-5``)\n Stopping tolerance for reconstruction error.\n\n max_iter : integer, optional (default ``max_iter = 500``)\n Maximum number of iterations to perform before exiting.\n\n min_iter : integer, optional (default ``min_iter = 1``)\n Minimum number of iterations to perform before exiting.\n\n max_time : integer, optional (default ``max_time = np.inf``)\n Maximum computational time before exiting.\n\n verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)\n Display progress.\n\n\n Returns\n -------\n result : FitResult instance\n Object which holds the fitted results. It provides the factor matrices\n in form of a KTensor, ``result.factors``.\n\n\n Notes\n -----\n Alternating Least Squares (ALS) is a very old and reliable method for\n fitting CP decompositions. This is likely a good first algorithm to try.\n\n\n References\n ----------\n Kolda, T. G. & Bader, B. W.\n \"Tensor Decompositions and Applications.\"\n SIAM Rev. 51 (2009): 455-500\n http://epubs.siam.org/doi/pdf/10.1137/07070111X\n\n Comon, Pierre & Xavier Luciani & Andre De Almeida.\n \"Tensor decompositions, alternating least squares and other tales.\"\n Journal of chemometrics 23 (2009): 393-405.\n http://onlinelibrary.wiley.com/doi/10.1002/cem.1236/abstract\n\n\n Examples\n --------\n\n ```\n import tensortools as tt\n I, J, K, R = 20, 20, 20, 4\n X = tt.randn_tensor(I, J, K, rank=R)\n tt.cp_als(X, rank=R)\n ```\n "
] |
Please provide a description of the function:def fit(self, X, ranks, replicates=1, verbose=True):
# Make ranks iterable if necessary.
if not isinstance(ranks, collections.Iterable):
ranks = (ranks,)
# Iterate over model ranks, optimize multiple replicates at each rank.
for r in ranks:
# Initialize storage
if r not in self.results:
self.results[r] = []
# Display fitting progress.
if verbose:
itr = trange(replicates,
desc='Fitting rank-{} models'.format(r),
leave=False)
else:
itr = range(replicates)
# Fit replicates.
for i in itr:
model_fit = self._fit_method(X, r, **self._fit_options)
self.results[r].append(model_fit)
# Print summary of results.
if verbose:
min_obj = min([res.obj for res in self.results[r]])
max_obj = max([res.obj for res in self.results[r]])
elapsed = sum([res.total_time for res in self.results[r]])
print('Rank-{} models: min obj, {:.2f}; '
'max obj, {:.2f}; time to fit, '
'{:.1f}s'.format(r, min_obj, max_obj, elapsed))
# Sort results from lowest to largest loss.
for r in ranks:
idx = np.argsort([result.obj for result in self.results[r]])
self.results[r] = [self.results[r][i] for i in idx]
# Align best model within each rank to best model of next larger rank.
# Here r0 is the rank of the lower-dimensional model and r1 is the rank
# of the high-dimensional model.
for i in reversed(range(1, len(ranks))):
r0, r1 = ranks[i-1], ranks[i]
U = self.results[r0][0].factors
V = self.results[r1][0].factors
kruskal_align(U, V, permute_U=True)
# For each rank, align everything to the best model
for r in ranks:
# store best factors
U = self.results[r][0].factors # best model factors
self.results[r][0].similarity = 1.0 # similarity to itself
# align lesser fit models to best models
for res in self.results[r][1:]:
res.similarity = kruskal_align(U, res.factors, permute_V=True) | [
"\n Fits CP tensor decompositions for different choices of rank.\n\n Parameters\n ----------\n X : array_like\n Real tensor\n ranks : int, or iterable\n iterable specifying number of components in each model\n replicates: int\n number of models to fit at each rank\n verbose : bool\n If True, prints summaries and optimization progress.\n "
] |
Please provide a description of the function:def objectives(self, rank):
self._check_rank(rank)
return [result.obj for result in self.results[rank]] | [
"Returns objective values of models with specified rank.\n "
] |
Please provide a description of the function:def similarities(self, rank):
self._check_rank(rank)
return [result.similarity for result in self.results[rank]] | [
"Returns similarity scores for models with specified rank.\n "
] |
Please provide a description of the function:def factors(self, rank):
self._check_rank(rank)
return [result.factors for result in self.results[rank]] | [
"Returns KTensor factors for models with specified rank.\n "
] |
Please provide a description of the function:def commit(self):
# Iterate on a new set, as we remove record during iteration from the
# original one
for record in set(self.dirty):
values = {}
for field in record._values_to_write:
if record.id in record._values_to_write[field]:
value = record._values_to_write[field].pop(record.id)
values[field] = value
# Store the value in the '_values' dictionary. This
# operation is delegated to each field descriptor as some
# values can not be stored "as is" (e.g. magic tuples of
# 2many fields need to be converted)
record.__class__.__dict__[field].store(record, value)
record.write(values)
self.dirty.remove(record) | [
"Commit dirty records to the server. This method is automatically\n called when the `auto_commit` option is set to `True` (default).\n It can be useful to set the former option to `False` to get better\n performance by reducing the number of RPC requests generated.\n\n With `auto_commit` set to `True` (default behaviour), each time a value\n is set on a record field a RPC request is sent to the server to update\n the record:\n\n .. doctest::\n\n >>> user = odoo.env.user\n >>> user.name = \"Joe\" # write({'name': \"Joe\"})\n >>> user.email = \"[email protected]\" # write({'email': \"[email protected]\"})\n\n With `auto_commit` set to `False`, changes on a record are sent all at\n once when calling the :func:`commit` method:\n\n .. doctest::\n\n >>> odoo.config['auto_commit'] = False\n >>> user = odoo.env.user\n >>> user.name = \"Joe\"\n >>> user.email = \"[email protected]\"\n >>> user in odoo.env.dirty\n True\n >>> odoo.env.commit() # write({'name': \"Joe\", 'email': \"[email protected]\"})\n >>> user in odoo.env.dirty\n False\n\n Only one RPC request is generated in the last case.\n "
] |
Please provide a description of the function:def ref(self, xml_id):
model, id_ = self._odoo.execute(
'ir.model.data', 'xmlid_to_res_model_res_id', xml_id, True)
return self[model].browse(id_) | [
"Return the record corresponding to the given `xml_id` (also called\n external ID).\n Raise an :class:`RPCError <odoorpc.error.RPCError>` if no record\n is found.\n\n .. doctest::\n\n >>> odoo.env.ref('base.lang_en')\n Recordset('res.lang', [1])\n\n :return: a :class:`odoorpc.models.Model` instance (recordset)\n :raise: :class:`odoorpc.error.RPCError`\n "
] |
Please provide a description of the function:def _create_model_class(self, model):
cls_name = model.replace('.', '_')
# Hack for Python 2 (no need to do this for Python 3)
if sys.version_info[0] < 3:
if isinstance(cls_name, unicode):
cls_name = cls_name.encode('utf-8')
# Retrieve server fields info and generate corresponding local fields
attrs = {
'_env': self,
'_odoo': self._odoo,
'_name': model,
'_columns': {},
}
fields_get = self._odoo.execute(model, 'fields_get')
for field_name, field_data in fields_get.items():
if field_name not in FIELDS_RESERVED:
Field = fields.generate_field(field_name, field_data)
attrs['_columns'][field_name] = Field
attrs[field_name] = Field
# Case where no field 'name' exists, we generate one (which will be
# in readonly mode) in purpose to be filled with the 'name_get' method
if 'name' not in attrs['_columns']:
field_data = {'type': 'text', 'string': 'Name', 'readonly': True}
Field = fields.generate_field('name', field_data)
attrs['_columns']['name'] = Field
attrs['name'] = Field
return type(cls_name, (Model,), attrs) | [
"Generate the model proxy class.\n\n :return: a :class:`odoorpc.models.Model` class\n "
] |
Please provide a description of the function:def get_all(rc_file='~/.odoorpcrc'):
conf = ConfigParser()
conf.read([os.path.expanduser(rc_file)])
sessions = {}
for name in conf.sections():
sessions[name] = {
'type': conf.get(name, 'type'),
'host': conf.get(name, 'host'),
'protocol': conf.get(name, 'protocol'),
'port': conf.getint(name, 'port'),
'timeout': conf.getfloat(name, 'timeout'),
'user': conf.get(name, 'user'),
'passwd': conf.get(name, 'passwd'),
'database': conf.get(name, 'database'),
}
return sessions | [
"Return all session configurations from the `rc_file` file.\n\n >>> import odoorpc\n >>> from pprint import pprint as pp\n >>> pp(odoorpc.session.get_all()) # doctest: +SKIP\n {'foo': {'database': 'db_name',\n 'host': 'localhost',\n 'passwd': 'password',\n 'port': 8069,\n 'protocol': 'jsonrpc',\n 'timeout': 120,\n 'type': 'ODOO',\n 'user': 'admin'},\n ...}\n\n .. doctest::\n :hide:\n\n >>> import odoorpc\n >>> session = '%s_session' % DB\n >>> odoo.save(session)\n >>> data = odoorpc.session.get_all()\n >>> data[session]['host'] == HOST\n True\n >>> data[session]['protocol'] == PROTOCOL\n True\n >>> data[session]['port'] == int(PORT)\n True\n >>> data[session]['database'] == DB\n True\n >>> data[session]['user'] == USER\n True\n >>> data[session]['passwd'] == PWD\n True\n >>> data[session]['type'] == 'ODOO'\n True\n "
] |
Please provide a description of the function:def get(name, rc_file='~/.odoorpcrc'):
conf = ConfigParser()
conf.read([os.path.expanduser(rc_file)])
if not conf.has_section(name):
raise ValueError(
"'%s' session does not exist in %s" % (name, rc_file))
return {
'type': conf.get(name, 'type'),
'host': conf.get(name, 'host'),
'protocol': conf.get(name, 'protocol'),
'port': conf.getint(name, 'port'),
'timeout': conf.getfloat(name, 'timeout'),
'user': conf.get(name, 'user'),
'passwd': conf.get(name, 'passwd'),
'database': conf.get(name, 'database'),
} | [
"Return the session configuration identified by `name`\n from the `rc_file` file.\n\n >>> import odoorpc\n >>> from pprint import pprint as pp\n >>> pp(odoorpc.session.get('foo')) # doctest: +SKIP\n {'database': 'db_name',\n 'host': 'localhost',\n 'passwd': 'password',\n 'port': 8069,\n 'protocol': 'jsonrpc',\n 'timeout': 120,\n 'type': 'ODOO',\n 'user': 'admin'}\n\n .. doctest::\n :hide:\n\n >>> import odoorpc\n >>> session = '%s_session' % DB\n >>> odoo.save(session)\n >>> data = odoorpc.session.get(session)\n >>> data['host'] == HOST\n True\n >>> data['protocol'] == PROTOCOL\n True\n >>> data['port'] == int(PORT)\n True\n >>> data['database'] == DB\n True\n >>> data['user'] == USER\n True\n >>> data['passwd'] == PWD\n True\n >>> data['type'] == 'ODOO'\n True\n\n :raise: `ValueError` (wrong session name)\n "
] |
Please provide a description of the function:def save(name, data, rc_file='~/.odoorpcrc'):
conf = ConfigParser()
conf.read([os.path.expanduser(rc_file)])
if not conf.has_section(name):
conf.add_section(name)
for key in data:
value = data[key]
conf.set(name, key, str(value))
with open(os.path.expanduser(rc_file), 'w') as file_:
os.chmod(os.path.expanduser(rc_file), stat.S_IREAD | stat.S_IWRITE)
conf.write(file_) | [
"Save the `data` session configuration under the name `name`\n in the `rc_file` file.\n\n >>> import odoorpc\n >>> odoorpc.session.save(\n ... 'foo',\n ... {'type': 'ODOO', 'host': 'localhost', 'protocol': 'jsonrpc',\n ... 'port': 8069, 'timeout': 120, 'database': 'db_name'\n ... 'user': 'admin', 'passwd': 'password'}) # doctest: +SKIP\n\n .. doctest::\n :hide:\n\n >>> import odoorpc\n >>> session = '%s_session' % DB\n >>> odoorpc.session.save(\n ... session,\n ... {'type': 'ODOO', 'host': HOST, 'protocol': PROTOCOL,\n ... 'port': PORT, 'timeout': 120, 'database': DB,\n ... 'user': USER, 'passwd': PWD})\n "
] |
Please provide a description of the function:def remove(name, rc_file='~/.odoorpcrc'):
conf = ConfigParser()
conf.read([os.path.expanduser(rc_file)])
if not conf.has_section(name):
raise ValueError(
"'%s' session does not exist in %s" % (name, rc_file))
conf.remove_section(name)
with open(os.path.expanduser(rc_file), 'wb') as file_:
conf.write(file_) | [
"Remove the session configuration identified by `name`\n from the `rc_file` file.\n\n >>> import odoorpc\n >>> odoorpc.session.remove('foo') # doctest: +SKIP\n\n .. doctest::\n :hide:\n\n >>> import odoorpc\n >>> session = '%s_session' % DB\n >>> odoorpc.session.remove(session)\n\n :raise: `ValueError` (wrong session name)\n "
] |
Please provide a description of the function:def get_encodings(hint_encoding='utf-8'):
fallbacks = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}
if hint_encoding:
yield hint_encoding
if hint_encoding.lower() in fallbacks:
yield fallbacks[hint_encoding.lower()]
# some defaults (also taking care of pure ASCII)
for charset in ['utf8', 'latin1', 'ascii']:
if not hint_encoding or (charset.lower() != hint_encoding.lower()):
yield charset
from locale import getpreferredencoding
prefenc = getpreferredencoding()
if prefenc and prefenc.lower() != 'utf-8':
yield prefenc
prefenc = fallbacks.get(prefenc.lower())
if prefenc:
yield prefenc | [
"Used to try different encoding.\n Function copied from Odoo 11.0 (odoo.loglevels.get_encodings).\n This piece of code is licensed under the LGPL-v3 and so it is compatible\n with the LGPL-v3 license of OdooRPC::\n\n - https://github.com/odoo/odoo/blob/11.0/LICENSE\n - https://github.com/odoo/odoo/blob/11.0/COPYRIGHT\n "
] |
Please provide a description of the function:def get_json_log_data(data):
log_data = data
for param in LOG_HIDDEN_JSON_PARAMS:
if param in data['params']:
if log_data is data:
log_data = copy.deepcopy(data)
log_data['params'][param] = "**********"
return log_data | [
"Returns a new `data` dictionary with hidden params\n for log purpose.\n "
] |
Please provide a description of the function:def json(self, url, params):
data = self._connector.proxy_json(url, params)
if data.get('error'):
raise error.RPCError(
data['error']['data']['message'],
data['error'])
return data | [
"Low level method to execute JSON queries.\n It basically performs a request and raises an\n :class:`odoorpc.error.RPCError` exception if the response contains\n an error.\n\n You have to know the names of each parameter required by the function\n called, and set them in the `params` dictionary.\n\n Here an authentication request:\n\n .. doctest::\n :options: +SKIP\n\n >>> data = odoo.json(\n ... '/web/session/authenticate',\n ... {'db': 'db_name', 'login': 'admin', 'password': 'admin'})\n >>> from pprint import pprint\n >>> pprint(data)\n {'id': 645674382,\n 'jsonrpc': '2.0',\n 'result': {'db': 'db_name',\n 'session_id': 'fa740abcb91784b8f4750c5c5b14da3fcc782d11',\n 'uid': 1,\n 'user_context': {'lang': 'en_US',\n 'tz': 'Europe/Brussels',\n 'uid': 1},\n 'username': 'admin'}}\n\n .. doctest::\n :hide:\n\n >>> data = odoo.json(\n ... '/web/session/authenticate',\n ... {'db': DB, 'login': USER, 'password': PWD})\n >>> data['result']['db'] == DB\n True\n >>> data['result']['uid'] in [1, 2]\n True\n >>> data['result']['username'] == USER\n True\n\n And a call to the ``read`` method of the ``res.users`` model:\n\n .. doctest::\n :options: +SKIP\n\n >>> data = odoo.json(\n ... '/web/dataset/call',\n ... {'model': 'res.users', 'method': 'read',\n ... 'args': [[2], ['name']]})\n >>> from pprint import pprint\n >>> pprint(data)\n {'id': ...,\n 'jsonrpc': '2.0',\n 'result': [{'id': 2, 'name': 'Mitchell Admin'}]}\n\n *Python 2:*\n\n :return: a dictionary (JSON response)\n :raise: :class:`odoorpc.error.RPCError`\n :raise: `urllib2.HTTPError` (if `params` is not a dictionary)\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :return: a dictionary (JSON response)\n :raise: :class:`odoorpc.error.RPCError`\n :raise: `urllib.error.HTTPError` (if `params` is not a dictionary)\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def http(self, url, data=None, headers=None):
return self._connector.proxy_http(url, data, headers) | [
"Low level method to execute raw HTTP queries.\n\n .. note::\n\n For low level JSON-RPC queries, see the more convenient\n :func:`odoorpc.ODOO.json` method instead.\n\n You have to know the names of each POST parameter required by the\n URL, and set them in the `data` string/buffer.\n The `data` argument must be built by yourself, following the expected\n URL parameters (with :func:`urllib.urlencode` function for simple\n parameters, or multipart/form-data structure to handle file upload).\n\n E.g., the HTTP raw query to get the company logo on `Odoo 12.0`:\n\n .. doctest::\n\n >>> response = odoo.http('web/binary/company_logo')\n >>> binary_data = response.read()\n\n *Python 2:*\n\n :return: `urllib.addinfourl`\n :raise: `urllib2.HTTPError`\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :return: `http.client.HTTPResponse`\n :raise: `urllib.error.HTTPError`\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def _check_logged_user(self):
if not self._env or not self._password or not self._login:
raise error.InternalError("Login required") | [
"Check if a user is logged. Otherwise, an error is raised."
] |
Please provide a description of the function:def login(self, db, login='admin', password='admin'):
# Get the user's ID and generate the corresponding user record
data = self.json(
'/web/session/authenticate',
{'db': db, 'login': login, 'password': password})
uid = data['result']['uid']
if uid:
context = data['result']['user_context']
self._env = Environment(self, db, uid, context=context)
self._login = login
self._password = password
else:
raise error.RPCError("Wrong login ID or password") | [
"Log in as the given `user` with the password `passwd` on the\n database `db`.\n\n .. doctest::\n :options: +SKIP\n\n >>> odoo.login('db_name', 'admin', 'admin')\n >>> odoo.env.user.name\n 'Administrator'\n\n *Python 2:*\n\n :raise: :class:`odoorpc.error.RPCError`\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :raise: :class:`odoorpc.error.RPCError`\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def logout(self):
if not self._env:
return False
self.json('/web/session/destroy', {})
self._env = None
self._login = None
self._password = None
return True | [
"Log out the user.\n\n >>> odoo.logout()\n True\n\n *Python 2:*\n\n :return: `True` if the operation succeed, `False` if no user was logged\n :raise: :class:`odoorpc.error.RPCError`\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :return: `True` if the operation succeed, `False` if no user was logged\n :raise: :class:`odoorpc.error.RPCError`\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def execute(self, model, method, *args):
self._check_logged_user()
# Execute the query
args_to_send = [self.env.db, self.env.uid, self._password,
model, method]
args_to_send.extend(args)
data = self.json(
'/jsonrpc',
{'service': 'object',
'method': 'execute',
'args': args_to_send})
return data.get('result') | [
"Execute the `method` of `model`.\n `*args` parameters varies according to the `method` used.\n\n .. doctest::\n :options: +SKIP\n\n >>> odoo.execute('res.partner', 'read', [1], ['name'])\n [{'id': 1, 'name': 'YourCompany'}]\n\n .. doctest::\n :hide:\n\n >>> data = odoo.execute('res.partner', 'read', [1], ['name'])\n >>> data[0]['id'] == 1\n True\n >>> data[0]['name'] == 'YourCompany'\n True\n\n *Python 2:*\n\n :return: the result returned by the `method` called\n :raise: :class:`odoorpc.error.RPCError`\n :raise: :class:`odoorpc.error.InternalError` (if not logged)\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :return: the result returned by the `method` called\n :raise: :class:`odoorpc.error.RPCError`\n :raise: :class:`odoorpc.error.InternalError` (if not logged)\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def exec_workflow(self, model, record_id, signal):
if tools.v(self.version)[0] >= 11:
raise DeprecationWarning(
u"Workflows have been removed in Odoo >= 11.0")
self._check_logged_user()
# Execute the workflow query
args_to_send = [self.env.db, self.env.uid, self._password,
model, signal, record_id]
data = self.json(
'/jsonrpc',
{'service': 'object',
'method': 'exec_workflow',
'args': args_to_send})
return data.get('result') | [
"Execute the workflow `signal` on\n the instance having the ID `record_id` of `model`.\n\n *Python 2:*\n\n :raise: :class:`odoorpc.error.RPCError`\n :raise: :class:`odoorpc.error.InternalError` (if not logged)\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :raise: :class:`odoorpc.error.RPCError`\n :raise: :class:`odoorpc.error.InternalError` (if not logged)\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def save(self, name, rc_file='~/.odoorpcrc'):
self._check_logged_user()
data = {
'type': self.__class__.__name__,
'host': self.host,
'protocol': self.protocol,
'port': self.port,
'timeout': self.config['timeout'],
'user': self._login,
'passwd': self._password,
'database': self.env.db,
}
session.save(name, data, rc_file) | [
"Save the current :class:`ODOO <odoorpc.ODOO>` instance (a `session`)\n inside `rc_file` (``~/.odoorpcrc`` by default). This session will be\n identified by `name`::\n\n >>> import odoorpc\n >>> odoo = odoorpc.ODOO('localhost', port=8069)\n >>> odoo.login('db_name', 'admin', 'admin')\n >>> odoo.save('foo')\n\n Use the :func:`list <odoorpc.ODOO.list>` class method to list all\n stored sessions, and the :func:`load <odoorpc.ODOO.load>` class method\n to retrieve an already-connected :class:`ODOO <odoorpc.ODOO>` instance.\n\n *Python 2:*\n\n :raise: :class:`odoorpc.error.InternalError` (if not logged)\n :raise: `IOError`\n\n *Python 3:*\n\n :raise: :class:`odoorpc.error.InternalError` (if not logged)\n :raise: `PermissionError`\n :raise: `FileNotFoundError`\n "
] |
Please provide a description of the function:def load(cls, name, rc_file='~/.odoorpcrc'):
data = session.get(name, rc_file)
if data.get('type') != cls.__name__:
raise error.InternalError(
"'{0}' session is not of type '{1}'".format(
name, cls.__name__))
odoo = cls(
host=data['host'],
protocol=data['protocol'],
port=data['port'],
timeout=data['timeout'],
)
odoo.login(
db=data['database'], login=data['user'], password=data['passwd'])
return odoo | [
"Return a connected :class:`ODOO` session identified by `name`:\n\n .. doctest::\n :options: +SKIP\n\n >>> import odoorpc\n >>> odoo = odoorpc.ODOO.load('foo')\n\n Such sessions are stored with the\n :func:`save <odoorpc.ODOO.save>` method.\n\n *Python 2:*\n\n :raise: :class:`odoorpc.error.RPCError`\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :raise: :class:`odoorpc.error.RPCError`\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def list(cls, rc_file='~/.odoorpcrc'):
sessions = session.get_all(rc_file)
return [name for name in sessions
if sessions[name].get('type') == cls.__name__] | [
"Return a list of all stored sessions available in the\n `rc_file` file:\n\n .. doctest::\n :options: +SKIP\n\n >>> import odoorpc\n >>> odoorpc.ODOO.list()\n ['foo', 'bar']\n\n Use the :func:`save <odoorpc.ODOO.save>` and\n :func:`load <odoorpc.ODOO.load>` methods to manage such sessions.\n\n *Python 2:*\n\n :raise: `IOError`\n\n *Python 3:*\n\n :raise: `PermissionError`\n :raise: `FileNotFoundError`\n "
] |
Please provide a description of the function:def remove(cls, name, rc_file='~/.odoorpcrc'):
data = session.get(name, rc_file)
if data.get('type') != cls.__name__:
raise error.InternalError(
"'{0}' session is not of type '{1}'".format(
name, cls.__name__))
return session.remove(name, rc_file) | [
"Remove the session identified by `name` from the `rc_file` file:\n\n .. doctest::\n :options: +SKIP\n\n >>> import odoorpc\n >>> odoorpc.ODOO.remove('foo')\n True\n\n *Python 2:*\n\n :raise: `ValueError` (if the session does not exist)\n :raise: `IOError`\n\n *Python 3:*\n\n :raise: `ValueError` (if the session does not exist)\n :raise: `PermissionError`\n :raise: `FileNotFoundError`\n "
] |
Please provide a description of the function:def dump(self, password, db, format_='zip'):
args = [password, db]
if v(self._odoo.version)[0] >= 9:
args.append(format_)
data = self._odoo.json(
'/jsonrpc',
{'service': 'db',
'method': 'dump',
'args': args})
# Encode to bytes forced to be compatible with Python 3.2
# (its 'base64.standard_b64decode()' function only accepts bytes)
result = encode2bytes(data['result'])
content = base64.standard_b64decode(result)
return io.BytesIO(content) | [
"Backup the `db` database. Returns the dump as a binary ZIP file\n containing the SQL dump file alongside the filestore directory (if any).\n\n >>> dump = odoo.db.dump('super_admin_passwd', 'prod') # doctest: +SKIP\n\n .. doctest::\n :hide:\n\n >>> dump = odoo.db.dump(SUPER_PWD, DB)\n\n If you get a timeout error, increase this one before performing the\n request:\n\n >>> timeout_backup = odoo.config['timeout']\n >>> odoo.config['timeout'] = 600 # Timeout set to 10 minutes\n >>> dump = odoo.db.dump('super_admin_passwd', 'prod') # doctest: +SKIP\n >>> odoo.config['timeout'] = timeout_backup\n\n Write it on the file system:\n\n .. doctest::\n :options: +SKIP\n\n >>> with open('dump.zip', 'wb') as dump_zip:\n ... dump_zip.write(dump.read())\n ...\n\n .. doctest::\n :hide:\n\n >>> with open('dump.zip', 'wb') as dump_zip:\n ... fileno = dump_zip.write(dump.read()) # Python 3\n ...\n\n You can manipulate the file with the `zipfile` module for instance:\n\n .. doctest::\n :options: +SKIP\n\n >>> import zipfile\n >>> zipfile.ZipFile('dump.zip').namelist()\n ['dump.sql',\n 'filestore/ef/ef2c882a36dbe90fc1e7e28d816ad1ac1464cfbb',\n 'filestore/dc/dcf00aacce882bbfd117c0277e514f829b4c5bf0',\n ...]\n\n .. doctest::\n :hide:\n\n >>> import zipfile\n >>> zipfile.ZipFile('dump.zip').namelist() # doctest: +NORMALIZE_WHITESPACE\n ['dump.sql'...'filestore/...'...]\n\n The super administrator password is required to perform this method.\n\n *Python 2:*\n\n :return: `io.BytesIO`\n :raise: :class:`odoorpc.error.RPCError` (access denied / wrong database)\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :return: `io.BytesIO`\n :raise: :class:`odoorpc.error.RPCError` (access denied / wrong database)\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def create(self, password, db, demo=False, lang='en_US', admin_password='admin'):
self._odoo.json(
'/jsonrpc',
{'service': 'db',
'method': 'create_database',
'args': [password, db, demo, lang, admin_password]}) | [
"Request the server to create a new database named `db`\n which will have `admin_password` as administrator password and\n localized with the `lang` parameter.\n You have to set the flag `demo` to `True` in order to insert\n demonstration data.\n\n >>> odoo.db.create('super_admin_passwd', 'prod', False, 'fr_FR', 'my_admin_passwd') # doctest: +SKIP\n\n If you get a timeout error, increase this one before performing the\n request:\n\n >>> timeout_backup = odoo.config['timeout']\n >>> odoo.config['timeout'] = 600 # Timeout set to 10 minutes\n >>> odoo.db.create('super_admin_passwd', 'prod', False, 'fr_FR', 'my_admin_passwd') # doctest: +SKIP\n >>> odoo.config['timeout'] = timeout_backup\n\n The super administrator password is required to perform this method.\n\n *Python 2:*\n\n :raise: :class:`odoorpc.error.RPCError` (access denied)\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :raise: :class:`odoorpc.error.RPCError` (access denied)\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def drop(self, password, db):
if self._odoo._env and self._odoo._env.db == db:
# Remove the existing session to avoid HTTP session error
self._odoo.logout()
data = self._odoo.json(
'/jsonrpc',
{'service': 'db',
'method': 'drop',
'args': [password, db]})
return data['result'] | [
"Drop the `db` database. Returns `True` if the database was removed,\n `False` otherwise (database did not exist):\n\n >>> odoo.db.drop('super_admin_passwd', 'test') # doctest: +SKIP\n True\n\n The super administrator password is required to perform this method.\n\n *Python 2:*\n\n :return: `True` or `False`\n :raise: :class:`odoorpc.error.RPCError` (access denied)\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :return: `True` or `False`\n :raise: :class:`odoorpc.error.RPCError` (access denied)\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def duplicate(self, password, db, new_db):
self._odoo.json(
'/jsonrpc',
{'service': 'db',
'method': 'duplicate_database',
'args': [password, db, new_db]}) | [
"Duplicate `db' as `new_db`.\n\n >>> odoo.db.duplicate('super_admin_passwd', 'prod', 'test') # doctest: +SKIP\n\n The super administrator password is required to perform this method.\n\n *Python 2:*\n\n :raise: :class:`odoorpc.error.RPCError` (access denied / wrong database)\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :raise: :class:`odoorpc.error.RPCError` (access denied / wrong database)\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def restore(self, password, db, dump, copy=False):
if dump.closed:
raise error.InternalError("Dump file closed")
b64_data = base64.standard_b64encode(dump.read()).decode()
self._odoo.json(
'/jsonrpc',
{'service': 'db',
'method': 'restore',
'args': [password, db, b64_data, copy]}) | [
"Restore the `dump` database into the new `db` database.\n The `dump` file object can be obtained with the\n :func:`dump <DB.dump>` method.\n If `copy` is set to `True`, the restored database will have a new UUID.\n\n >>> odoo.db.restore('super_admin_passwd', 'test', dump_file) # doctest: +SKIP\n\n If you get a timeout error, increase this one before performing the\n request:\n\n >>> timeout_backup = odoo.config['timeout']\n >>> odoo.config['timeout'] = 7200 # Timeout set to 2 hours\n >>> odoo.db.restore('super_admin_passwd', 'test', dump_file) # doctest: +SKIP\n >>> odoo.config['timeout'] = timeout_backup\n\n The super administrator password is required to perform this method.\n\n *Python 2:*\n\n :raise: :class:`odoorpc.error.RPCError`\n (access denied / database already exists)\n :raise: :class:`odoorpc.error.InternalError` (dump file closed)\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :raise: :class:`odoorpc.error.RPCError`\n (access denied / database already exists)\n :raise: :class:`odoorpc.error.InternalError` (dump file closed)\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def _get_proxies(self):
proxy_json = jsonrpclib.ProxyJSON(
self.host, self.port, self._timeout,
ssl=self.ssl, deserialize=self.deserialize, opener=self._opener)
proxy_http = jsonrpclib.ProxyHTTP(
self.host, self.port, self._timeout,
ssl=self.ssl, opener=self._opener)
# Detect the server version
if self.version is None:
result = proxy_json('/web/webclient/version_info')['result']
if 'server_version' in result:
self.version = result['server_version']
return proxy_json, proxy_http | [
"Returns the :class:`ProxyJSON <odoorpc.rpc.jsonrpclib.ProxyJSON>`\n and :class:`ProxyHTTP <odoorpc.rpc.jsonrpclib.ProxyHTTP>` instances\n corresponding to the server version used.\n "
] |
Please provide a description of the function:def timeout(self, timeout):
self._proxy_json._timeout = timeout
self._proxy_http._timeout = timeout | [
"Set the timeout."
] |
Please provide a description of the function:def is_int(value):
if isinstance(value, bool):
return False
try:
int(value)
return True
except (ValueError, TypeError):
return False | [
"Return `True` if ``value`` is an integer."
] |
Please provide a description of the function:def odoo_tuple_in(iterable):
if not iterable:
return False
def is_odoo_tuple(elt):
try:
return elt[:1][0] in [1, 2, 3, 4, 5] \
or elt[:2] in [(6, 0), [6, 0], (0, 0), [0, 0]]
except (TypeError, IndexError):
return False
return any(is_odoo_tuple(elt) for elt in iterable) | [
"Return `True` if `iterable` contains an expected tuple like\n ``(6, 0, IDS)`` (and so on).\n\n >>> odoo_tuple_in([0, 1, 2]) # Simple list\n False\n >>> odoo_tuple_in([(6, 0, [42])]) # List of tuples\n True\n >>> odoo_tuple_in([[1, 42]]) # List of lists\n True\n ",
"Return `True` if `elt` is a Odoo special tuple."
] |
Please provide a description of the function:def tuples2ids(tuples, ids):
for value in tuples:
if value[0] == 6 and value[2]:
ids = value[2]
elif value[0] == 5:
ids[:] = []
elif value[0] == 4 and value[1] and value[1] not in ids:
ids.append(value[1])
elif value[0] == 3 and value[1] and value[1] in ids:
ids.remove(value[1])
return ids | [
"Update `ids` according to `tuples`, e.g. (3, 0, X), (4, 0, X)..."
] |
Please provide a description of the function:def records2ids(iterable):
def record2id(elt):
if isinstance(elt, Model):
return elt.id
return elt
return [record2id(elt) for elt in iterable] | [
"Replace records contained in `iterable` with their corresponding IDs:\n\n >>> groups = list(odoo.env.user.groups_id)\n >>> records2ids(groups)\n [1, 2, 3, 14, 17, 18, 19, 7, 8, 9, 5, 20, 21, 22, 23]\n ",
"If `elt` is a record, return its ID."
] |
Please provide a description of the function:def generate_field(name, data):
assert 'type' in data
field = TYPES_TO_FIELDS.get(data['type'], Unknown)(name, data)
return field | [
"Generate a well-typed field according to the data dictionary supplied\n (obtained via the `fields_get' method of any models).\n "
] |
Please provide a description of the function:def check_value(self, value):
#if self.readonly:
# raise error.Error(
# "'{field_name}' field is readonly".format(
# field_name=self.name))
if value and self.size:
if not is_string(value):
raise ValueError("Value supplied has to be a string")
if len(value) > self.size:
raise ValueError(
"Lenght of the '{0}' is limited to {1}".format(
self.name, self.size))
if not value and self.required:
raise ValueError("'{0}' field is required".format(self.name))
return value | [
"Check the validity of a value for the field."
] |
Please provide a description of the function:def store(self, record, value):
record._values[self.name][record.id] = value | [
"Store the value in the record."
] |
Please provide a description of the function:def store(self, record, value):
if record._values[self.name].get(record.id):
tuples2ids(value, record._values[self.name][record.id])
else:
record._values[self.name][record.id] = tuples2ids(value, []) | [
"Store the value in the record."
] |
Please provide a description of the function:def _check_relation(self, relation):
selection = [val[0] for val in self.selection]
if relation not in selection:
raise ValueError(
("The value '{value}' supplied doesn't match with the possible"
" values '{selection}' for the '{field_name}' field").format(
value=relation,
selection=selection,
field_name=self.name,
))
return relation | [
"Raise a `ValueError` if `relation` is not allowed among\n the possible values.\n "
] |
Please provide a description of the function:def download(self, name, ids, datas=None, context=None):
if context is None:
context = self._odoo.env.context
def check_report(name):
report_model = 'ir.actions.report'
if v(self._odoo.version)[0] < 11:
report_model = 'ir.actions.report.xml'
IrReport = self._odoo.env[report_model]
report_ids = IrReport.search([('report_name', '=', name)])
report_id = report_ids and report_ids[0] or False
if not report_id:
raise ValueError("The report '%s' does not exist." % name)
return report_id
report_id = check_report(name)
# Odoo >= 11.0
if v(self._odoo.version)[0] >= 11:
IrReport = self._odoo.env['ir.actions.report']
report = IrReport.browse(report_id)
response = report.with_context(context).render(ids, data=datas)
content = response[0]
# On the server the result is a bytes string,
# but the RPC layer of Odoo returns it as a unicode string,
# so we encode it again as bytes
result = content.encode('latin1')
return io.BytesIO(result)
# Odoo < 11.0
else:
args_to_send = [self._odoo.env.db,
self._odoo.env.uid, self._odoo._password,
name, ids, datas, context]
data = self._odoo.json(
'/jsonrpc',
{'service': 'report',
'method': 'render_report',
'args': args_to_send})
if 'result' not in data and not data['result'].get('result'):
raise ValueError("Received invalid data.")
# Encode to bytes forced to be compatible with Python 3.2
# (its 'base64.standard_b64decode()' function only accepts bytes)
result = encode2bytes(data['result']['result'])
content = base64.standard_b64decode(result)
return io.BytesIO(content) | [
"Download a report from the server and return it as a remote file.\n For instance, to download the \"Quotation / Order\" report of sale orders\n identified by the IDs ``[2, 3]``:\n\n .. doctest::\n :options: +SKIP\n\n >>> report = odoo.report.download('sale.report_saleorder', [2, 3])\n\n .. doctest::\n :hide:\n\n >>> report = odoo.report.download('sale.report_saleorder', [2])\n\n Write it on the file system:\n\n .. doctest::\n :options: +SKIP\n\n >>> with open('sale_orders.pdf', 'wb') as report_file:\n ... report_file.write(report.read())\n ...\n\n .. doctest::\n :hide:\n\n >>> with open('sale_orders.pdf', 'wb') as report_file:\n ... fileno = report_file.write(report.read()) # Python 3\n ...\n\n *Python 2:*\n\n :return: `io.BytesIO`\n :raise: :class:`odoorpc.error.RPCError` (wrong parameters)\n :raise: `ValueError` (received invalid data)\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :return: `io.BytesIO`\n :raise: :class:`odoorpc.error.RPCError` (wrong parameters)\n :raise: `ValueError` (received invalid data)\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def list(self):
report_model = 'ir.actions.report'
if v(self._odoo.version)[0] < 11:
report_model = 'ir.actions.report.xml'
IrReport = self._odoo.env[report_model]
report_ids = IrReport.search([])
reports = IrReport.read(
report_ids, ['name', 'model', 'report_name', 'report_type'])
result = {}
for report in reports:
model = report.pop('model')
report.pop('id')
if model not in result:
result[model] = []
result[model].append(report)
return result | [
"List available reports from the server by returning a dictionary\n with reports classified by data model:\n\n .. doctest::\n :options: +SKIP\n\n >>> odoo.report.list()['account.invoice']\n [{'name': u'Duplicates',\n 'report_name': u'account.account_invoice_report_duplicate_main',\n 'report_type': u'qweb-pdf'},\n {'name': 'Invoices',\n 'report_type': 'qweb-pdf',\n 'report_name': 'account.report_invoice'}]\n\n .. doctest::\n :hide:\n\n >>> from pprint import pprint as pp\n >>> any(data['report_name'] == 'account.report_invoice'\n ... for data in odoo.report.list()['account.invoice'])\n True\n\n *Python 2:*\n\n :return: `list` of dictionaries\n :raise: `urllib2.URLError` (connection error)\n\n *Python 3:*\n\n :return: `list` of dictionaries\n :raise: `urllib.error.URLError` (connection error)\n "
] |
Please provide a description of the function:def _browse(cls, env, ids, from_record=None, iterated=None):
records = cls()
records._env_local = env
records._ids = _normalize_ids(ids)
if iterated:
records._values = iterated._values
records._values_to_write = iterated._values_to_write
else:
records._from_record = from_record
records._values = {}
records._values_to_write = {}
for field in cls._columns:
records._values[field] = {}
records._values_to_write[field] = {}
records._init_values()
return records | [
"Create an instance (a recordset) corresponding to `ids` and\n attached to `env`.\n\n `from_record` parameter is used when the recordset is related to a\n parent record, and as such can take the value of a tuple\n (record, field). This is useful to update the parent record when the\n current recordset is modified.\n\n `iterated` can take the value of an iterated recordset, and no extra\n RPC queries are made to generate the resulting record (recordset and\n its record share the same values).\n "
] |
Please provide a description of the function:def with_context(cls, *args, **kwargs):
context = dict(args[0] if args else cls.env.context, **kwargs)
return cls.with_env(cls.env(context=context)) | [
"Return a model (or recordset) equivalent to the current model\n (or recordset) attached to an environment with another context.\n The context is taken from the current environment or from the\n positional arguments `args` if given, and modified by `kwargs`.\n\n Thus, the following two examples are equivalent:\n\n .. doctest::\n\n >>> Product = odoo.env['product.product']\n >>> Product.with_context(lang='fr_FR')\n Model('product.product')\n\n .. doctest::\n\n >>> context = Product.env.context\n >>> Product.with_context(context, lang='fr_FR')\n Model('product.product')\n\n This method is very convenient for example to search records\n whatever their active status are (active/inactive):\n\n .. doctest::\n\n >>> all_product_ids = Product.with_context(active_test=False).search([])\n\n Or to update translations of a recordset:\n\n .. doctest::\n\n >>> product_en = Product.browse(1)\n >>> product_en.env.lang\n 'en_US'\n >>> product_en.name = \"My product\" # Update the english translation\n >>> product_fr = product_en.with_context(lang='fr_FR')\n >>> product_fr.env.lang\n 'fr_FR'\n >>> product_fr.name = \"Mon produit\" # Update the french translation\n "
] |
Please provide a description of the function:def _with_context(self, *args, **kwargs):
context = dict(args[0] if args else self.env.context, **kwargs)
return self.with_env(self.env(context=context)) | [
"As the `with_context` class method but for recordset."
] |
Please provide a description of the function:def with_env(cls, env):
new_cls = type(cls.__name__, cls.__bases__, dict(cls.__dict__))
new_cls._env = env
return new_cls | [
"Return a model (or recordset) equivalent to the current model\n (or recordset) attached to `env`.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.