language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def decode_content(self, msg: RawMessage) -> bool: """Decode MIB_UPLOAD response message content. Returns: result : True if successful """ if self._ak: # MIB_UPLOAD_NEXT response - normal flow me_class, inst, attr_mask = struct.unpack_from("!HHH", msg, self.content_offset) # Create & populate upload_me me_class_type = MeClassMapper.me_by_class(me_class) if me_class_type is None: # This is not an error. There are many ME classes that we don't support logger.debug("can't decode message for me_class {}. Skipped".format(me_class)) self._upload_me = None return True # Decode attributes self._upload_me = me_class_type(inst) ret = self.decode_attributes(msg, self.content_offset + 6, attr_mask, decode_me=self._upload_me) logger.debug("decode: {}{}".format(self._upload_me, ret and ': OK' or ': FAILED')) else: # MIB_UPLOAD_NEXT request - debug flow self._sequence_num = struct.unpack_from("!H", msg, self.content_offset)[0] return ret
def decode_content(self, msg: RawMessage) -> bool: """Decode MIB_UPLOAD response message content. Returns: result : True if successful """ if self._ak: # MIB_UPLOAD_NEXT response - normal flow me_class, inst, attr_mask = struct.unpack_from("!HHH", msg, self.content_offset) # Create & populate upload_me me_class_type = MeClassMapper.me_by_class(me_class) if me_class_type is None: # This is not an error. There are many ME classes that we don't support logger.debug("can't decode message for me_class {}. Skipped".format(me_class)) self._upload_me = None return True # Decode attributes self._upload_me = me_class_type(inst) ret = self.decode_attributes(msg, self.content_offset + 6, attr_mask, decode_me=self._upload_me) logger.debug("decode: {}{}".format(self._upload_me, ret and ': OK' or ': FAILED')) else: # MIB_UPLOAD_NEXT request - debug flow self._sequence_num = struct.unpack_from("!H", msg, self.content_offset)[0] return ret
Python
def reinit(self): """ Re-initialize action, prepare for reuse """ self._ar = self._ar or self._ak self._ak = False self._content_len = 0 # will be calculated for extended self._total_len = self._extended and 2044 or 44
def reinit(self): """ Re-initialize action, prepare for reuse """ self._ar = self._ar or self._ak self._ak = False self._content_len = 0 # will be calculated for extended self._total_len = self._extended and 2044 or 44
Python
def rollback(self, onu: 'OnuDriver') -> 'OmciAction': """ Create a roll-back action Args: onu : OnuDriver containing the current ONU MIB Returns: An action that rolls-back 'this' action, or None if not applicable """ return None
def rollback(self, onu: 'OnuDriver') -> 'OmciAction': """ Create a roll-back action Args: onu : OnuDriver containing the current ONU MIB Returns: An action that rolls-back 'this' action, or None if not applicable """ return None
Python
def commit(self, onu: 'OnuDriver'): """ Commit action results top ONU MIB Args: onu : OnuDriver containing the current ONU MIB Raises an exception in case of commit failure """ pass
def commit(self, onu: 'OnuDriver'): """ Commit action results top ONU MIB Args: onu : OnuDriver containing the current ONU MIB Raises an exception in case of commit failure """ pass
Python
def decode_key_fields(msg: RawMessage) -> Tuple[int, int, int]: """Decode key fields of the raw message Args: msg: raw OMCI message Returns: tci, ak, action """ tci, mt, dev = struct.unpack_from('!HBB', msg, 0) ak = bool((mt >> 5) & 0x1) action = mt & 0x1f extended = (dev == 0x0b) content_offset = extended and 10 or 8 content_length = extended and struct.unpack_from('!H', msg, 8)[0] or 36 return tci, ak, action
def decode_key_fields(msg: RawMessage) -> Tuple[int, int, int]: """Decode key fields of the raw message Args: msg: raw OMCI message Returns: tci, ak, action """ tci, mt, dev = struct.unpack_from('!HBB', msg, 0) ak = bool((mt >> 5) & 0x1) action = mt & 0x1f extended = (dev == 0x0b) content_offset = extended and 10 or 8 content_length = extended and struct.unpack_from('!H', msg, 8)[0] or 36 return tci, ak, action
Python
def next_tci(self) -> int: """ This function resturns the next TCI value to be transmitted """ with self._lock: self._tci = (self._tci == 0x7fff) and 1 or self._tci + 1 tci = self._tci return tci
def next_tci(self) -> int: """ This function resturns the next TCI value to be transmitted """ with self._lock: self._tci = (self._tci == 0x7fff) and 1 or self._tci + 1 tci = self._tci return tci
Python
def ack_timeout(self) -> float: """ Response timeout value (s). OMH handler waits for response for ack_timeout (s) before retransmitting or reporting a TIMEOUT failure. """ return self._ack_timeout
def ack_timeout(self) -> float: """ Response timeout value (s). OMH handler waits for response for ack_timeout (s) before retransmitting or reporting a TIMEOUT failure. """ return self._ack_timeout
Python
def recv(self, msg: RawMessage): """ Message received from ONU. This function is called by communication layer. Args: msg - Raw OMCI message """ # Peek in the common OMCI message header tci, ak, action = OmciAction.decode_key_fields(msg) # If this is a response, identify the requester. # Otherwise, deliver to the registered subscriber for the action if ak: sender = None with self._lock: for _tci, _sender in self._pending_requests: if _tci == tci: self._pending_requests.remove((_tci, _sender)) sender = _sender break if sender: sender.recv(msg) else: logger.warning("recv: unexpected ACK with TCI {} from ONU {} discarded.".format(tci, self._onu_id)) else: # Not a response. Identify message handler by message type if action not in OnuDriver._message_handler_by_mt: logger.error("recv: Don't know how to handle MT {} from ONU {}. Message discarded.".format( action, self._onu_id)) return OnuDriver._message_handler_by_mt[action].recv(self, msg)
def recv(self, msg: RawMessage): """ Message received from ONU. This function is called by communication layer. Args: msg - Raw OMCI message """ # Peek in the common OMCI message header tci, ak, action = OmciAction.decode_key_fields(msg) # If this is a response, identify the requester. # Otherwise, deliver to the registered subscriber for the action if ak: sender = None with self._lock: for _tci, _sender in self._pending_requests: if _tci == tci: self._pending_requests.remove((_tci, _sender)) sender = _sender break if sender: sender.recv(msg) else: logger.warning("recv: unexpected ACK with TCI {} from ONU {} discarded.".format(tci, self._onu_id)) else: # Not a response. Identify message handler by message type if action not in OnuDriver._message_handler_by_mt: logger.error("recv: Don't know how to handle MT {} from ONU {}. Message discarded.".format( action, self._onu_id)) return OnuDriver._message_handler_by_mt[action].recv(self, msg)
Python
def dump_mib(self): """Dump the main MIB to the screen""" print('=== Start of MIB dump for ONU {} ==='.format(self._onu_id)) all_me_classes = self.get_all_me_classes() for me_class in all_me_classes: for me in self.get_all_instances(me_class): print('{}'.format(me)) print('=== End of MIB dump for ONU {} ==='.format(self._onu_id))
def dump_mib(self): """Dump the main MIB to the screen""" print('=== Start of MIB dump for ONU {} ==='.format(self._onu_id)) all_me_classes = self.get_all_me_classes() for me_class in all_me_classes: for me in self.get_all_instances(me_class): print('{}'.format(me)) print('=== End of MIB dump for ONU {} ==='.format(self._onu_id))
Python
def tc_set(self, pbit: int, tc: int): """ Set Traffic Class (TC) value for a pbit Args: pbit: 0..7 tc: 0..7 """ assert 0 <= pbit <= 7 assert 0 <= tc <= 7 self._pbit_to_tc_map[pbit] = tc
def tc_set(self, pbit: int, tc: int): """ Set Traffic Class (TC) value for a pbit Args: pbit: 0..7 tc: 0..7 """ assert 0 <= pbit <= 7 assert 0 <= tc <= 7 self._pbit_to_tc_map[pbit] = tc
Python
def pbits_by_tc(self, tc: int) -> Tuple[int, ...]: """ Get all PBITS that map to the specified TC """ pbits = [] for pbit in range(len(self._pbit_to_tc_map)): if tc == self._pbit_to_tc_map[pbit]: pbits.append(pbit) return tuple(pbits)
def pbits_by_tc(self, tc: int) -> Tuple[int, ...]: """ Get all PBITS that map to the specified TC """ pbits = [] for pbit in range(len(self._pbit_to_tc_map)): if tc == self._pbit_to_tc_map[pbit]: pbits.append(pbit) return tuple(pbits)
Python
def recv(self, vomci_msg : tr451_vomci_sbi_message_pb2): """ Message received via the channel. Hand it over to the proxy for forwarding """ logger.info("ProxyGrpcClient: Received message") self._parent.recv(self, vomci_msg)
def recv(self, vomci_msg : tr451_vomci_sbi_message_pb2): """ Message received via the channel. Hand it over to the proxy for forwarding """ logger.info("ProxyGrpcClient: Received message") self._parent.recv(self, vomci_msg)
Python
def create_8021p_svc_mapper(handler:OmhHandler, name: str) -> OMHStatus: """ Create 802.1p Service Mapper handler: OMH handler that requested this service name: QoS profile name Returns: completion status """ all_instance_ids = handler._onu.get_all_instance_ids(omci_me_class['IEEE_8021_P_MAPPER_SVC_PROF']) inst_id = len(all_instance_ids) > 0 and all_instance_ids[-1] + 1 or 1 profile_me = ieee_8021_p_mapper_svc_prof_me( inst_id, tp_ptr=OMCI_NULL_PTR, interwork_tp_ptr_pri_0=OMCI_NULL_PTR, interwork_tp_ptr_pri_1=OMCI_NULL_PTR, interwork_tp_ptr_pri_2=OMCI_NULL_PTR, interwork_tp_ptr_pri_3=OMCI_NULL_PTR, interwork_tp_ptr_pri_4=OMCI_NULL_PTR, interwork_tp_ptr_pri_5=OMCI_NULL_PTR, interwork_tp_ptr_pri_6=OMCI_NULL_PTR, interwork_tp_ptr_pri_7=OMCI_NULL_PTR, unmarked_frame_opt='DERIVE_IMPLIED_PCP', mapper_tp_type='BRIDGING_MAPPING') profile_me.user_name = name if handler.transaction(CreateAction(handler, profile_me)) != OMHStatus.OK: return handler.logerr_and_return( handler._transaction_status, 'Create IEEE 802.1p Mapper SVC Profile ME {}'.format(name)) return OMHStatus.OK
def create_8021p_svc_mapper(handler:OmhHandler, name: str) -> OMHStatus: """ Create 802.1p Service Mapper handler: OMH handler that requested this service name: QoS profile name Returns: completion status """ all_instance_ids = handler._onu.get_all_instance_ids(omci_me_class['IEEE_8021_P_MAPPER_SVC_PROF']) inst_id = len(all_instance_ids) > 0 and all_instance_ids[-1] + 1 or 1 profile_me = ieee_8021_p_mapper_svc_prof_me( inst_id, tp_ptr=OMCI_NULL_PTR, interwork_tp_ptr_pri_0=OMCI_NULL_PTR, interwork_tp_ptr_pri_1=OMCI_NULL_PTR, interwork_tp_ptr_pri_2=OMCI_NULL_PTR, interwork_tp_ptr_pri_3=OMCI_NULL_PTR, interwork_tp_ptr_pri_4=OMCI_NULL_PTR, interwork_tp_ptr_pri_5=OMCI_NULL_PTR, interwork_tp_ptr_pri_6=OMCI_NULL_PTR, interwork_tp_ptr_pri_7=OMCI_NULL_PTR, unmarked_frame_opt='DERIVE_IMPLIED_PCP', mapper_tp_type='BRIDGING_MAPPING') profile_me.user_name = name if handler.transaction(CreateAction(handler, profile_me)) != OMHStatus.OK: return handler.logerr_and_return( handler._transaction_status, 'Create IEEE 802.1p Mapper SVC Profile ME {}'.format(name)) return OMHStatus.OK
Python
def create_mac_bridge_port(handler: OmhHandler, tp: ME) -> OMHStatus: """Create MAC Bridge Port Config Data for an interface Args: handler: OMH handler that requested this service tp: Bridge port Termination Point Returns: completion status """ # Get mac_bridge_svc_prof ME mac_bridge_svc_prof = handler._onu.get_first(omci_me_class['MAC_BRIDGE_SVC_PROF']) if mac_bridge_svc_prof is None: return handler.logerr_and_return(OMHStatus.INTERNAL_ERROR, "MAC Bridge Service Profile ME doesn't exist") all_bridge_ports = handler._onu.get_all_instance_ids(omci_me_class['MAC_BRIDGE_PORT_CONFIG_DATA']) port_num = len(all_bridge_ports) > 0 and all_bridge_ports[-1] + 1 or 1 mac_bridge_port = mac_bridge_port_config_data_me(inst=port_num, bridge_id_ptr = mac_bridge_svc_prof.inst, port_num = port_num, tp_ptr = tp.inst) if type(tp) is pptp_eth_uni_me: mac_bridge_port.tp_type = 'PHY_PATH_TP_ETH_UNI' elif type(tp) is virtual_eth_intf_point_me: mac_bridge_port.tp_type = 'VIRTUAL_ETH_INTERFACE_POINT' elif type(tp) is ieee_8021_p_mapper_svc_prof_me: mac_bridge_port.tp_type = 'IEEE_8021_P_MAPPER_SVC_PROF' else: return OMHStatus.NOT_SUPPORTED status = handler.transaction(CreateAction(handler, mac_bridge_port)) if status == OMHStatus.OK: tp.set_user_attr('bridge_port', mac_bridge_port.inst) return OMHStatus.OK
def create_mac_bridge_port(handler: OmhHandler, tp: ME) -> OMHStatus: """Create MAC Bridge Port Config Data for an interface Args: handler: OMH handler that requested this service tp: Bridge port Termination Point Returns: completion status """ # Get mac_bridge_svc_prof ME mac_bridge_svc_prof = handler._onu.get_first(omci_me_class['MAC_BRIDGE_SVC_PROF']) if mac_bridge_svc_prof is None: return handler.logerr_and_return(OMHStatus.INTERNAL_ERROR, "MAC Bridge Service Profile ME doesn't exist") all_bridge_ports = handler._onu.get_all_instance_ids(omci_me_class['MAC_BRIDGE_PORT_CONFIG_DATA']) port_num = len(all_bridge_ports) > 0 and all_bridge_ports[-1] + 1 or 1 mac_bridge_port = mac_bridge_port_config_data_me(inst=port_num, bridge_id_ptr = mac_bridge_svc_prof.inst, port_num = port_num, tp_ptr = tp.inst) if type(tp) is pptp_eth_uni_me: mac_bridge_port.tp_type = 'PHY_PATH_TP_ETH_UNI' elif type(tp) is virtual_eth_intf_point_me: mac_bridge_port.tp_type = 'VIRTUAL_ETH_INTERFACE_POINT' elif type(tp) is ieee_8021_p_mapper_svc_prof_me: mac_bridge_port.tp_type = 'IEEE_8021_P_MAPPER_SVC_PROF' else: return OMHStatus.NOT_SUPPORTED status = handler.transaction(CreateAction(handler, mac_bridge_port)) if status == OMHStatus.OK: tp.set_user_attr('bridge_port', mac_bridge_port.inst) return OMHStatus.OK
Python
def create_ext_vlan_tag_oper_config_data(handler: OmhHandler, iface: ME, input_tpid: int = 0, output_tpid: int = 0) -> OMHStatus: """Create MAC Bridge Port Config Data for an interface Requires user attribute iface.bridge_port_num to be set """ all_instances = handler._onu.get_all_instance_ids(omci_me_class['EXT_VLAN_TAG_OPER_CONFIG_DATA']) inst = len(all_instances) > 0 and all_instances[-1] + 1 or 1 ext_vlan_tag = ext_vlan_tag_oper_config_data_me(inst) if type(iface) is pptp_eth_uni_me: ext_vlan_tag.assoc_type = 'PPTP_ETH_UNI' elif type(iface) is virtual_eth_intf_point_me: ext_vlan_tag.assoc_type = 'VEIP' elif type(iface) is mac_bridge_port_config_data_me: ext_vlan_tag.assoc_type = 'MAC_BRIDGE_PORT_CFG_DATA' else: return OMHStatus.NOT_SUPPORTED ext_vlan_tag.assoc_me_ptr = iface.inst status = handler.transaction(CreateAction(handler, ext_vlan_tag)) if status != OMHStatus.OK: return status # Now set attributes that can't be set by 'create' action ext_vlan_tag.input_tpid = input_tpid != 0 and input_tpid or 0x8100 ext_vlan_tag.output_tpid = output_tpid != 0 and output_tpid or 0x8100 # XXX TODO: need to support additional downstream_mode values? ext_vlan_tag.ds_mode = 'US_INVERSE' status = handler.transaction(SetAction(handler, ext_vlan_tag, ('input_tpid', 'output_tpid', 'ds_mode'))) if status != OMHStatus.OK: return status iface.set_user_attr('ext_vlan_tag_op', inst) return OMHStatus.OK
def create_ext_vlan_tag_oper_config_data(handler: OmhHandler, iface: ME, input_tpid: int = 0, output_tpid: int = 0) -> OMHStatus: """Create MAC Bridge Port Config Data for an interface Requires user attribute iface.bridge_port_num to be set """ all_instances = handler._onu.get_all_instance_ids(omci_me_class['EXT_VLAN_TAG_OPER_CONFIG_DATA']) inst = len(all_instances) > 0 and all_instances[-1] + 1 or 1 ext_vlan_tag = ext_vlan_tag_oper_config_data_me(inst) if type(iface) is pptp_eth_uni_me: ext_vlan_tag.assoc_type = 'PPTP_ETH_UNI' elif type(iface) is virtual_eth_intf_point_me: ext_vlan_tag.assoc_type = 'VEIP' elif type(iface) is mac_bridge_port_config_data_me: ext_vlan_tag.assoc_type = 'MAC_BRIDGE_PORT_CFG_DATA' else: return OMHStatus.NOT_SUPPORTED ext_vlan_tag.assoc_me_ptr = iface.inst status = handler.transaction(CreateAction(handler, ext_vlan_tag)) if status != OMHStatus.OK: return status # Now set attributes that can't be set by 'create' action ext_vlan_tag.input_tpid = input_tpid != 0 and input_tpid or 0x8100 ext_vlan_tag.output_tpid = output_tpid != 0 and output_tpid or 0x8100 # XXX TODO: need to support additional downstream_mode values? ext_vlan_tag.ds_mode = 'US_INVERSE' status = handler.transaction(SetAction(handler, ext_vlan_tag, ('input_tpid', 'output_tpid', 'ds_mode'))) if status != OMHStatus.OK: return status iface.set_user_attr('ext_vlan_tag_op', inst) return OMHStatus.OK
Python
def create_vlan_tagging_filter_data(handler: OmhHandler, inst: int, name: str, classifier: PacketClassifier, vlan_action: VlanAction) -> OMHStatus: """ Create and configure VLAN Tagging Filter Data ME Please note that vlan tagging filter is applied BEFORE vlan_action in the ingress and AFTER vlan_action in the egress. Therefore, need to take 'action' into account. Args: handler: OMH handler that requested this service inst: instance id. Must be equal to the instance of the associated MAC Bridge Port Data ME classifier: Packet classifier vlan_action: Packet VLAN action Returns: completion status """ # XXX: TODO: add support for multiple VLAN classifier in a sub-interface if vlan_action is not None: if vlan_action.action == VlanAction.Action.PUSH or vlan_action.action == VlanAction.Action.TRANSLATE: o_vid = vlan_action.o_vid elif vlan_action.action == VlanAction.Action.POP: o_vid = classifier is None and None or classifier.field('i_vid') else: return handler.logerr_and_return(OMHStatus.NOT_SUPPORTED, "VLAN action {} is not supported".format(vlan_action.action)) else: o_vid = classifier is None and None or classifier.field('o_vid') tcid = 0 if o_vid is None: action = 'TAGGED_BRIDGING_A_NO_INVESTIGATION_UNTAGGED_BRIDGING_A' elif o_vid.pbit != PBIT_VALUE_ANY: tcid |= (o_vid.pbit << 13) if o_vid.vid != VID_VALUE_ANY: action = 'TAGGED_ACTION_H_TCI_INVESTIGATION_UNTAGGED_DISCARDING_C_DUP' tcid |= o_vid.vid else: action = 'TAGGED_ACTION_H_PRI_INVESTIGATION_UNTAGGED_DISCARDING_C_DUP' elif o_vid.vid != VID_VALUE_ANY: # Classify by VID only action = 'TAGGED_ACTION_H_VID_INVESTIGATION_UNTAGGED_DISCARDING_C_DUP' tcid |= o_vid.vid vlan_filter = bytearray(2) vlan_filter[0] = tcid >> 8 vlan_filter[1] = tcid & 0xff tag_filter_me = vlan_tag_filter_data_me(inst, vlan_filter_list=vlan_filter, forward_oper=action, num_of_entries=1) tag_filter_me.user_name = name return handler.transaction(CreateAction(handler, tag_filter_me))
def create_vlan_tagging_filter_data(handler: OmhHandler, inst: int, name: str, classifier: PacketClassifier, vlan_action: VlanAction) -> OMHStatus: """ Create and configure VLAN Tagging Filter Data ME Please note that vlan tagging filter is applied BEFORE vlan_action in the ingress and AFTER vlan_action in the egress. Therefore, need to take 'action' into account. Args: handler: OMH handler that requested this service inst: instance id. Must be equal to the instance of the associated MAC Bridge Port Data ME classifier: Packet classifier vlan_action: Packet VLAN action Returns: completion status """ # XXX: TODO: add support for multiple VLAN classifier in a sub-interface if vlan_action is not None: if vlan_action.action == VlanAction.Action.PUSH or vlan_action.action == VlanAction.Action.TRANSLATE: o_vid = vlan_action.o_vid elif vlan_action.action == VlanAction.Action.POP: o_vid = classifier is None and None or classifier.field('i_vid') else: return handler.logerr_and_return(OMHStatus.NOT_SUPPORTED, "VLAN action {} is not supported".format(vlan_action.action)) else: o_vid = classifier is None and None or classifier.field('o_vid') tcid = 0 if o_vid is None: action = 'TAGGED_BRIDGING_A_NO_INVESTIGATION_UNTAGGED_BRIDGING_A' elif o_vid.pbit != PBIT_VALUE_ANY: tcid |= (o_vid.pbit << 13) if o_vid.vid != VID_VALUE_ANY: action = 'TAGGED_ACTION_H_TCI_INVESTIGATION_UNTAGGED_DISCARDING_C_DUP' tcid |= o_vid.vid else: action = 'TAGGED_ACTION_H_PRI_INVESTIGATION_UNTAGGED_DISCARDING_C_DUP' elif o_vid.vid != VID_VALUE_ANY: # Classify by VID only action = 'TAGGED_ACTION_H_VID_INVESTIGATION_UNTAGGED_DISCARDING_C_DUP' tcid |= o_vid.vid vlan_filter = bytearray(2) vlan_filter[0] = tcid >> 8 vlan_filter[1] = tcid & 0xff tag_filter_me = vlan_tag_filter_data_me(inst, vlan_filter_list=vlan_filter, forward_oper=action, num_of_entries=1) tag_filter_me.user_name = name return handler.transaction(CreateAction(handler, tag_filter_me))
Python
def _set_8021p_mapper(handler: OmhHandler, mapper_me: ieee_8021_p_mapper_svc_prof_me, pbit_to_tc: 'PbitToTCMapper', uni_name: str, pbit: int) -> (OMHStatus, bool): """ Set up 802_1p mapper ME for a specified priority and all other priorities that map to the same traffic class """ tc = pbit_to_tc.tc(pbit) if tc is None: return OMHStatus.OK, False # Find a GEM port by UNI and TC gem = get_gem_port_by_uni_tc(handler.onu, uni_name, tc) if gem is None: return OMHStatus.OK, False # Find GAL ethernet profile gal_eth_prof = handler.onu.get_by_name('gal_eth_prof') if gal_eth_prof is None: logger.error("set_8021p_mapper: GAL Ethernet Profile is not found") return OMHStatus.INTERNAL_ERROR, False # Create GEM_IW_TP all_gem_iw_tp_inst = handler.onu.get_all_instance_ids(omci_me_class['GEM_IW_TP']) gem_iw_tp_inst = len(all_gem_iw_tp_inst) > 0 and all_gem_iw_tp_inst[-1] + 1 or 1 gem_iw_tp = gem_iw_tp_me( gem_iw_tp_inst, gem_port_net_ctp_conn_ptr = gem.inst, iw_opt = 'IEEE_8021_P_MAPPER', svc_prof_ptr = mapper_me.inst, iw_tp_ptr = OMCI_NULL_PTR, gal_prof_ptr = gal_eth_prof.inst ) status = handler.transaction(CreateAction(handler, gem_iw_tp)) if status != OMHStatus.OK: return status, False # Plug GEM IW_TP into all priorities that map to the same tc pbits_by_tc = pbit_to_tc.pbits_by_tc(tc) for pbit in pbits_by_tc: _set_8021p_priority(mapper_me, pbit, gem_iw_tp.inst) return OMHStatus.OK, True
def _set_8021p_mapper(handler: OmhHandler, mapper_me: ieee_8021_p_mapper_svc_prof_me, pbit_to_tc: 'PbitToTCMapper', uni_name: str, pbit: int) -> (OMHStatus, bool): """ Set up 802_1p mapper ME for a specified priority and all other priorities that map to the same traffic class """ tc = pbit_to_tc.tc(pbit) if tc is None: return OMHStatus.OK, False # Find a GEM port by UNI and TC gem = get_gem_port_by_uni_tc(handler.onu, uni_name, tc) if gem is None: return OMHStatus.OK, False # Find GAL ethernet profile gal_eth_prof = handler.onu.get_by_name('gal_eth_prof') if gal_eth_prof is None: logger.error("set_8021p_mapper: GAL Ethernet Profile is not found") return OMHStatus.INTERNAL_ERROR, False # Create GEM_IW_TP all_gem_iw_tp_inst = handler.onu.get_all_instance_ids(omci_me_class['GEM_IW_TP']) gem_iw_tp_inst = len(all_gem_iw_tp_inst) > 0 and all_gem_iw_tp_inst[-1] + 1 or 1 gem_iw_tp = gem_iw_tp_me( gem_iw_tp_inst, gem_port_net_ctp_conn_ptr = gem.inst, iw_opt = 'IEEE_8021_P_MAPPER', svc_prof_ptr = mapper_me.inst, iw_tp_ptr = OMCI_NULL_PTR, gal_prof_ptr = gal_eth_prof.inst ) status = handler.transaction(CreateAction(handler, gem_iw_tp)) if status != OMHStatus.OK: return status, False # Plug GEM IW_TP into all priorities that map to the same tc pbits_by_tc = pbit_to_tc.pbits_by_tc(tc) for pbit in pbits_by_tc: _set_8021p_priority(mapper_me, pbit, gem_iw_tp.inst) return OMHStatus.OK, True
Python
def encode_content(self) -> RawMessage: """Encode CREATE request message content. Returns: raw OMCI message content """ return bytearray()
def encode_content(self) -> RawMessage: """Encode CREATE request message content. Returns: raw OMCI message content """ return bytearray()
Python
def decode_content(self, msg: RawMessage) -> bool: """Decode CREATE response message content. Returns: result : True if successful """ if self._ak: # MIB_RESET response - normal flow self._omci_result = struct.unpack_from("!H", msg, self.content_offset)[0] return True
def decode_content(self, msg: RawMessage) -> bool: """Decode CREATE response message content. Returns: result : True if successful """ if self._ak: # MIB_RESET response - normal flow self._omci_result = struct.unpack_from("!H", msg, self.content_offset)[0] return True
Python
def commit(self, onu: 'OnuDriver'): """ Commit action results top ONU MIB/ Args: onu : OnuDriver containing the current ONU MIB Raises: an exception in case of commit failure """ if not onu.delete(self._me.me_class, self._me.inst): raise Exception('{} - failed to commit to the local MIB'.format(self.name))
def commit(self, onu: 'OnuDriver'): """ Commit action results top ONU MIB/ Args: onu : OnuDriver containing the current ONU MIB Raises: an exception in case of commit failure """ if not onu.delete(self._me.me_class, self._me.inst): raise Exception('{} - failed to commit to the local MIB'.format(self.name))
Python
def _init_and_connect(self) -> OnuDriver: """ Initialize and connect Args: args : command line arguments Returns: OnuDriver or None of connection failed """ # Create gRPC channel and try to connect if (self._args.server_mode): logger.info("Test {}: waiting for connection on port {}:{}..". format(self._name, self._args.polt_host, self._args.port)) self._server = GrpcServer(port=self._args.port, name=self._args.vomci_name) connections = self._server.connections() while len(connections.values()) == 0: time.sleep(1) connection = list(connections.values())[0]; connection.connected(connection.remote_endpoint_name) olt = OltDatabase.OltGetFirst() # Add ONU to the database onu = olt.OnuAdd(self._args.onu_name, (self._args.cterm_name, self._args.onu_id), tci=self._args.tci) else: logger.info("Test {}: connecting to {}:{}..".format(self._name, self._args.polt_host, self._args.port)) channel = GrpcClientChannel(name=self._args.vomci_name) ret_val = channel.connect(host=self._args.polt_host, port=self._args.port) if not ret_val: logger.warning("Test {}: connection failed".format(self._name)) return None olt = channel.add_managed_onu(channel.remote_endpoint_name, self._args.onu_name, (self._args.cterm_name, self._args.onu_id), tci=self._args.tci) logger.info("test_YangtoOmciMapper: connected to the pOLT: {}".format(olt.id)) onu = olt.OnuGet((self._args.cterm_name, self._args.onu_id)) logger.info("Test {}: connected to {}".format(self._name, olt.id)) return onu
def _init_and_connect(self) -> OnuDriver: """ Initialize and connect Args: args : command line arguments Returns: OnuDriver or None of connection failed """ # Create gRPC channel and try to connect if (self._args.server_mode): logger.info("Test {}: waiting for connection on port {}:{}..". format(self._name, self._args.polt_host, self._args.port)) self._server = GrpcServer(port=self._args.port, name=self._args.vomci_name) connections = self._server.connections() while len(connections.values()) == 0: time.sleep(1) connection = list(connections.values())[0]; connection.connected(connection.remote_endpoint_name) olt = OltDatabase.OltGetFirst() # Add ONU to the database onu = olt.OnuAdd(self._args.onu_name, (self._args.cterm_name, self._args.onu_id), tci=self._args.tci) else: logger.info("Test {}: connecting to {}:{}..".format(self._name, self._args.polt_host, self._args.port)) channel = GrpcClientChannel(name=self._args.vomci_name) ret_val = channel.connect(host=self._args.polt_host, port=self._args.port) if not ret_val: logger.warning("Test {}: connection failed".format(self._name)) return None olt = channel.add_managed_onu(channel.remote_endpoint_name, self._args.onu_name, (self._args.cterm_name, self._args.onu_id), tci=self._args.tci) logger.info("test_YangtoOmciMapper: connected to the pOLT: {}".format(olt.id)) onu = olt.OnuGet((self._args.cterm_name, self._args.onu_id)) logger.info("Test {}: connected to {}".format(self._name, olt.id)) return onu
Python
def run(self, extra_args : Optional[Any] = None) -> int: """ Run an OMH handler or a list of OMH handlers --iter number of times """ if self._args is None: self.parse_arguments() if self._handler_type is None: logger.error('handler type must be set') return -1 if not isinstance(self._handler_type, tuple) and not isinstance(self._handler_type, list): self._handler_type = (self._handler_type,) extra_args = (extra_args, ) self._onu = self._init_and_connect() if self._onu is None: return -1 self._onu.set_flow_control(max_retries=self._args.retries, ack_timeout=self._args.timeout) # Run the sequence the requiured number of iterations for iter in range(self._args.iters): if self._args.iters > 1: logger.info("Test {}: iteration {}".format(self._name, iter + 1)) if iter > 0: input("Press Enter to continue...") handler = TestOmhDriver.TestHandler(self._onu, self._handler_type, extra_args) if self._args.background: logger.info("Test {}: starting execution in the background".format(self._name)) handler.start(self._handler_completed) logger.info("Test {}: Waiting for completion..".format(handler._name)) self._sem.acquire() else: logger.info("Test {}: starting execution in the foreground".format(self._name)) handler.run() logger.info("Test {}: Finished execution. Status: {}".format(self._name, handler.status.name)) self._update_status(handler.status) if handler.status != OMHStatus.OK: break if self._onu.olt.channel is not None: self._onu.olt.channel.disconnect() if self._server is not None: self._server.stop() if self._args.dump_mib: self._onu.dump_mib() return self._status.value
def run(self, extra_args : Optional[Any] = None) -> int: """ Run an OMH handler or a list of OMH handlers --iter number of times """ if self._args is None: self.parse_arguments() if self._handler_type is None: logger.error('handler type must be set') return -1 if not isinstance(self._handler_type, tuple) and not isinstance(self._handler_type, list): self._handler_type = (self._handler_type,) extra_args = (extra_args, ) self._onu = self._init_and_connect() if self._onu is None: return -1 self._onu.set_flow_control(max_retries=self._args.retries, ack_timeout=self._args.timeout) # Run the sequence the requiured number of iterations for iter in range(self._args.iters): if self._args.iters > 1: logger.info("Test {}: iteration {}".format(self._name, iter + 1)) if iter > 0: input("Press Enter to continue...") handler = TestOmhDriver.TestHandler(self._onu, self._handler_type, extra_args) if self._args.background: logger.info("Test {}: starting execution in the background".format(self._name)) handler.start(self._handler_completed) logger.info("Test {}: Waiting for completion..".format(handler._name)) self._sem.acquire() else: logger.info("Test {}: starting execution in the foreground".format(self._name)) handler.run() logger.info("Test {}: Finished execution. Status: {}".format(self._name, handler.status.name)) self._update_status(handler.status) if handler.status != OMHStatus.OK: break if self._onu.olt.channel is not None: self._onu.olt.channel.disconnect() if self._server is not None: self._server.stop() if self._args.dump_mib: self._onu.dump_mib() return self._status.value
Python
def raw_value(self, value: AttrValue) -> AttrRawValue: """Convert data item user value to raw value. Args: value: external attribute value Returns: raw (internal) attribute value """ raise Exception('Unimplemented raw_value() method')
def raw_value(self, value: AttrValue) -> AttrRawValue: """Convert data item user value to raw value. Args: value: external attribute value Returns: raw (internal) attribute value """ raise Exception('Unimplemented raw_value() method')
Python
def value(self, raw_value: AttrRawValue) -> AttrValue: """Convert data item raw value to user value. Args: raw_value: raw (internal) attribute value Returns: External attribute value """ raise Exception('Unimplemented value() method')
def value(self, raw_value: AttrRawValue) -> AttrValue: """Convert data item raw value to user value. Args: raw_value: raw (internal) attribute value Returns: External attribute value """ raise Exception('Unimplemented value() method')
Python
def add_managed_onus(self, comm_channel): """ Add managed onus to communication channel, as per configuration. This function should be called by the GrpcServer when a new client connection is initiated. """ for managed_onu in ManagementChain.GetManagedOnus(): if managed_onu.downstream_endpoint_name == comm_channel.remote_endpoint_name: onu_id = (managed_onu.ct_ref, managed_onu.onu_id) comm_channel.add_managed_onu(managed_onu.olt_name, managed_onu.onu_name, onu_id)
def add_managed_onus(self, comm_channel): """ Add managed onus to communication channel, as per configuration. This function should be called by the GrpcServer when a new client connection is initiated. """ for managed_onu in ManagementChain.GetManagedOnus(): if managed_onu.downstream_endpoint_name == comm_channel.remote_endpoint_name: onu_id = (managed_onu.ct_ref, managed_onu.onu_id) comm_channel.add_managed_onu(managed_onu.olt_name, managed_onu.onu_name, onu_id)
Python
def trigger_create_onu(self, onu_name) -> (bool, str): """ Add ONU with onu_name to management chain. To be called by the kafka interface when a "create ONU" request is received. Args: onu_name: unique name of ONU """ if ManagementChain.GetOnu(onu_name) is not None: er_string = "ONU {} already exists in the management chain".format(onu_name) logger.error(er_string) self._kafka_if.send_unsuccessful_response(onu_name, error_msg=er_string) return ManagementChain.CreateOnu(onu_name) logger.info("Onu {} was created in vOMCi".format(onu_name)) self._kafka_if.send_successful_response(onu_name)
def trigger_create_onu(self, onu_name) -> (bool, str): """ Add ONU with onu_name to management chain. To be called by the kafka interface when a "create ONU" request is received. Args: onu_name: unique name of ONU """ if ManagementChain.GetOnu(onu_name) is not None: er_string = "ONU {} already exists in the management chain".format(onu_name) logger.error(er_string) self._kafka_if.send_unsuccessful_response(onu_name, error_msg=er_string) return ManagementChain.CreateOnu(onu_name) logger.info("Onu {} was created in vOMCi".format(onu_name)) self._kafka_if.send_successful_response(onu_name)
Python
def trigger_set_onu_communication(self, olt_name: str, onu_name: str, channel_termination: str, onu_tc_id: int, available: bool, olt_endpoint_name: str, voltmf_endpoint_name: str, voltmf_name: str): """ Use arguments to set/update the communication points and management chain of given ONU. Then initiate the ONU detect sequence. To be called by the kafka interface when a "set ONU communication" request is received. """ managed_onu = ManagementChain.SetOnuCommunication(olt_name, onu_name, channel_termination, onu_tc_id, available, olt_endpoint_name, voltmf_endpoint_name, voltmf_name) # Assign channel if communication is available channel = None for channel in self._server.connections().values(): if managed_onu.downstream_endpoint_name == channel.remote_endpoint_name: onu_id = (managed_onu.ct_ref, managed_onu.onu_id) channel.add_managed_onu(managed_onu.olt_name, onu_name, onu_id) break managed_onu.SetDsChannel(channel) if available: if channel is None: error_msg = "ONU {}: can't enable communication. remote-endpoint {} is not connected".format( onu_name, managed_onu.downstream_endpoint_name) logger.error(error_msg) self._kafka_if.send_unsuccessful_response(onu_name, error_msg=error_msg) self.trigger_onu_mib_sync(olt_name, olt_endpoint_name, onu_name, channel_termination, onu_tc_id) else: self._kafka_if.send_successful_response(onu_name)
def trigger_set_onu_communication(self, olt_name: str, onu_name: str, channel_termination: str, onu_tc_id: int, available: bool, olt_endpoint_name: str, voltmf_endpoint_name: str, voltmf_name: str): """ Use arguments to set/update the communication points and management chain of given ONU. Then initiate the ONU detect sequence. To be called by the kafka interface when a "set ONU communication" request is received. """ managed_onu = ManagementChain.SetOnuCommunication(olt_name, onu_name, channel_termination, onu_tc_id, available, olt_endpoint_name, voltmf_endpoint_name, voltmf_name) # Assign channel if communication is available channel = None for channel in self._server.connections().values(): if managed_onu.downstream_endpoint_name == channel.remote_endpoint_name: onu_id = (managed_onu.ct_ref, managed_onu.onu_id) channel.add_managed_onu(managed_onu.olt_name, onu_name, onu_id) break managed_onu.SetDsChannel(channel) if available: if channel is None: error_msg = "ONU {}: can't enable communication. remote-endpoint {} is not connected".format( onu_name, managed_onu.downstream_endpoint_name) logger.error(error_msg) self._kafka_if.send_unsuccessful_response(onu_name, error_msg=error_msg) self.trigger_onu_mib_sync(olt_name, olt_endpoint_name, onu_name, channel_termination, onu_tc_id) else: self._kafka_if.send_successful_response(onu_name)
Python
def merge(self, candidate: 'OnuMib', me_class: int): """ Merge values from candidate into this MIB """ if me_class is None: for cl in candidate._per_class_dict.values(): self._merge_me_class(cl) else: if me_class in candidate._per_class_dict: self._merge_me_class(candidate._per_class_dict[me_class])
def merge(self, candidate: 'OnuMib', me_class: int): """ Merge values from candidate into this MIB """ if me_class is None: for cl in candidate._per_class_dict.values(): self._merge_me_class(cl) else: if me_class in candidate._per_class_dict: self._merge_me_class(candidate._per_class_dict[me_class])
Python
def field(self, name: str) -> Any: """ Get classification field value, if any """ if name not in self._fields: return None return self._fields[name]
def field(self, name: str) -> Any: """ Get classification field value, if any """ if name not in self._fields: return None return self._fields[name]
Python
def _get_ip_host_config_by_index(self, index: int) -> ME: """ Get IP host config ME by index Returns: IP_HOST_CONFIG ME or None if not found """ all_ip_host_mes = self._onu.get_all_instances(omci_me_class['IP_HOST_CONFIG_DATA']) if all_ip_host_mes is None or index >= len(all_ip_host_mes): return None return all_ip_host_mes[index]
def _get_ip_host_config_by_index(self, index: int) -> ME: """ Get IP host config ME by index Returns: IP_HOST_CONFIG ME or None if not found """ all_ip_host_mes = self._onu.get_all_instances(omci_me_class['IP_HOST_CONFIG_DATA']) if all_ip_host_mes is None or index >= len(all_ip_host_mes): return None return all_ip_host_mes[index]
Python
def encode_content(self) -> RawMessage: """Encode SET request message content. Returns: raw OMCI message content """ if not self._ak: # SET request - normal flow msg = struct.pack("!H", self._attr_mask) msg += self.encode_attributes(msg) else: # SET response - mainly for debugging msg = struct.pack("!HHH", self._omci_result, self._opt_attrs_mask, self._attr_exec_mask) return msg
def encode_content(self) -> RawMessage: """Encode SET request message content. Returns: raw OMCI message content """ if not self._ak: # SET request - normal flow msg = struct.pack("!H", self._attr_mask) msg += self.encode_attributes(msg) else: # SET response - mainly for debugging msg = struct.pack("!HHH", self._omci_result, self._opt_attrs_mask, self._attr_exec_mask) return msg
Python
def commit(self, onu: 'OnuDriver'): """ Commit action results top ONU MIB. Args: onu : OnuDriver containing the current ONU MIB Raises: an exception in case of commit failure """ if not onu.set(self._me): raise Exception('{} - failed to commit to the local MIB'.format(self.name))
def commit(self, onu: 'OnuDriver'): """ Commit action results top ONU MIB. Args: onu : OnuDriver containing the current ONU MIB Raises: an exception in case of commit failure """ if not onu.set(self._me): raise Exception('{} - failed to commit to the local MIB'.format(self.name))
Python
def rollback(self, onu: 'OnuDriver') -> 'OmciAction': """ Create a roll-back action. Args: onu : OnuDriver containing the current ONU MIB Returns: An action that rolls-back 'this' action, or None if not applicable """ # If action failed - there is nothing to rollback if self._omci_result != 0 and self._omci_result != omci_status['ATTRIBUTES_FAILED_OR_UNKNOWN']: return None # Lookup ME in the ONU MIB. It is OK if it fails. It might've been set after CREATE. # In this case the CREATE's rollback will take care of it old_me = onu.get(self._me_class, self._inst, log_error = False) if old_me is None: return None return SetAction(self._owner, old_me, extended=self._extended)
def rollback(self, onu: 'OnuDriver') -> 'OmciAction': """ Create a roll-back action. Args: onu : OnuDriver containing the current ONU MIB Returns: An action that rolls-back 'this' action, or None if not applicable """ # If action failed - there is nothing to rollback if self._omci_result != 0 and self._omci_result != omci_status['ATTRIBUTES_FAILED_OR_UNKNOWN']: return None # Lookup ME in the ONU MIB. It is OK if it fails. It might've been set after CREATE. # In this case the CREATE's rollback will take care of it old_me = onu.get(self._me_class, self._inst, log_error = False) if old_me is None: return None return SetAction(self._owner, old_me, extended=self._extended)
Python
def user_attr(self, name: str): """ Retrieve user-defined attribute stored in ME instance Args: name: user attribute name Returns: user attribute value or None if not found """ if self._user_data is None or name not in self._user_data: return None return self._user_data[name]
def user_attr(self, name: str): """ Retrieve user-defined attribute stored in ME instance Args: name: user attribute name Returns: user attribute value or None if not found """ if self._user_data is None or name not in self._user_data: return None return self._user_data[name]
Python
def clear_user_attr(self, name: str): """ Clear user-defined attribute stored in ME instance Args: name: user attribute name """ if self._user_data is not None and name in self._user_data: del self._user_data[name]
def clear_user_attr(self, name: str): """ Clear user-defined attribute stored in ME instance Args: name: user attribute name """ if self._user_data is not None and name in self._user_data: del self._user_data[name]
Python
def attr(self, number_or_name: Union[int, str]) -> Attr: """Find a ME attribute descriptor by number or name. Args: number_or_name: ME attribute number or name. Returns: `Attr` descriptor. """ if isinstance(number_or_name, int): assert number_or_name >= 0 and number_or_name < len(self.attrs) return self.attrs[number_or_name] assert number_or_name in self._attr_names return self.attrs[self._attr_names.index(number_or_name)]
def attr(self, number_or_name: Union[int, str]) -> Attr: """Find a ME attribute descriptor by number or name. Args: number_or_name: ME attribute number or name. Returns: `Attr` descriptor. """ if isinstance(number_or_name, int): assert number_or_name >= 0 and number_or_name < len(self.attrs) return self.attrs[number_or_name] assert number_or_name in self._attr_names return self.attrs[self._attr_names.index(number_or_name)]
Python
def attr_value(self, number_or_name: Union[int, str]) -> AttrValue: """Find a ME attribute by number or name and return its value. Args: number_or_name: ME attribute number or name. Returns: `Attr` value. """ a = self.attr(number_or_name) val = self._attr_values[a.number] if val is None: val = a.default_value() return val
def attr_value(self, number_or_name: Union[int, str]) -> AttrValue: """Find a ME attribute by number or name and return its value. Args: number_or_name: ME attribute number or name. Returns: `Attr` value. """ a = self.attr(number_or_name) val = self._attr_values[a.number] if val is None: val = a.default_value() return val
Python
def attr_is_set(self, number_or_name: Union[int, str]) -> bool: """Find a ME attribute by number or name and return True if the attr is set explicitly. Args: number_or_name: ME attribute number or name. """ a = self.attr(number_or_name) return self._attr_values[a.number] is not None
def attr_is_set(self, number_or_name: Union[int, str]) -> bool: """Find a ME attribute by number or name and return True if the attr is set explicitly. Args: number_or_name: ME attribute number or name. """ a = self.attr(number_or_name) return self._attr_values[a.number] is not None
Python
def attr_is_encoded(self, number_or_name: Union[int, str]) -> bool: """Find a ME attribute by number or name and return True if the attr is set explicitly. Args: number_or_name: ME attribute number or name. """ a = self.attr(number_or_name) return self._attr_encoded[a.number]
def attr_is_encoded(self, number_or_name: Union[int, str]) -> bool: """Find a ME attribute by number or name and return True if the attr is set explicitly. Args: number_or_name: ME attribute number or name. """ a = self.attr(number_or_name) return self._attr_encoded[a.number]
Python
def attr_encode(self, number_or_name: Union[int, str]) -> bytearray: """Encode a ME attribute value. Args: number_or_name: ME attribute number or name. value: single value or ``None``. Returns: Encoded buffer. If ``None`` was supplied, an empty buffer is returned. """ a = self.attr(number_or_name) val = self._attr_values[a.number] buffer = a.data_type.encode(val) self._attr_encoded[a.number] = True return buffer
def attr_encode(self, number_or_name: Union[int, str]) -> bytearray: """Encode a ME attribute value. Args: number_or_name: ME attribute number or name. value: single value or ``None``. Returns: Encoded buffer. If ``None`` was supplied, an empty buffer is returned. """ a = self.attr(number_or_name) val = self._attr_values[a.number] buffer = a.data_type.encode(val) self._attr_encoded[a.number] = True return buffer
Python
def merge(self, with_me: 'ME'): """Merge this ME with another ME by copying all attributes that are explicitly set in 'with_me' into self """ if self is with_me: return assert type(self) is type(with_me) for i in range(1, len(self.attrs)): if with_me.attr_is_set(i): self._attr_values[i] = with_me._attr_values[i] if with_me.user_name is not None: self.user_name = with_me.user_name if with_me._user_data is not None: self._user_data = self._user_data and {**self._user_data, **with_me._user_data} or with_me._user_data
def merge(self, with_me: 'ME'): """Merge this ME with another ME by copying all attributes that are explicitly set in 'with_me' into self """ if self is with_me: return assert type(self) is type(with_me) for i in range(1, len(self.attrs)): if with_me.attr_is_set(i): self._attr_values[i] = with_me._attr_values[i] if with_me.user_name is not None: self.user_name = with_me.user_name if with_me._user_data is not None: self._user_data = self._user_data and {**self._user_data, **with_me._user_data} or with_me._user_data
Python
def attr_names(self, access: str = '', assigned_only: BoolDatum = False) -> tuple: """Return a list of all attribute names, optionally restricted to those with a specified access level. Args: access: Desired access level, or ``None`` to return all attributes. assigned_only: True = return only attributes with explicitly assigned values Returns: (`name1, name2, name3`). Note: Attribute 0 is ``me_inst`` (the ME instance number) and is not returned. """ return tuple( a.name for a in self.attrs if a.number > 0 and access in a.access and (not assigned_only or self._attr_values[a.number] is not None))
def attr_names(self, access: str = '', assigned_only: BoolDatum = False) -> tuple: """Return a list of all attribute names, optionally restricted to those with a specified access level. Args: access: Desired access level, or ``None`` to return all attributes. assigned_only: True = return only attributes with explicitly assigned values Returns: (`name1, name2, name3`). Note: Attribute 0 is ``me_inst`` (the ME instance number) and is not returned. """ return tuple( a.name for a in self.attrs if a.number > 0 and access in a.access and (not assigned_only or self._attr_values[a.number] is not None))
Python
def attr_numbers(self, access: str = '', assigned_only: BoolDatum = False) -> tuple: """Return a list of all attribute numbers, optionally restricted to those with a specified access level. Args: access: Desired access level, or ``None`` to return all attributes. assigned_only: True = return only attributes with explicitly assigned values Returns: Tuple in the form of (number1, number2, ..). Note: Attribute 0 is ``me_inst`` (the ME instance number) and is not returned. """ return tuple(a.number for a in self.attrs if a.number > 0 and access in a.access and (not assigned_only or self._attr_values[a.number] is not None))
def attr_numbers(self, access: str = '', assigned_only: BoolDatum = False) -> tuple: """Return a list of all attribute numbers, optionally restricted to those with a specified access level. Args: access: Desired access level, or ``None`` to return all attributes. assigned_only: True = return only attributes with explicitly assigned values Returns: Tuple in the form of (number1, number2, ..). Note: Attribute 0 is ``me_inst`` (the ME instance number) and is not returned. """ return tuple(a.number for a in self.attrs if a.number > 0 and access in a.access and (not assigned_only or self._attr_values[a.number] is not None))
Python
def attr_mask(self, access: str = '', assigned_only: BoolDatum = False) -> int: """Return a mask for all attributes, optionally restricted to those with a specified access level. Args: access: Desired access level, or ``None`` to return all attributes. assigned_only: True = return only attributes with explicitly assigned values Returns: attribute bitmask """ attr_numbers = self.attr_numbers(access, assigned_only) mask = 0 for an in attr_numbers: mask |= self.attr(an).mask return mask
def attr_mask(self, access: str = '', assigned_only: BoolDatum = False) -> int: """Return a mask for all attributes, optionally restricted to those with a specified access level. Args: access: Desired access level, or ``None`` to return all attributes. assigned_only: True = return only attributes with explicitly assigned values Returns: attribute bitmask """ attr_numbers = self.attr_numbers(access, assigned_only) mask = 0 for an in attr_numbers: mask |= self.attr(an).mask return mask
Python
def OnuAdd(self, onu_name: OnuName, onu_id: OnuSbiId, tci: int = 0) -> OnuDriver: """ Add ONU to the OLT database. Args: onu_name: ONU Name onu_id: ONU Id tci: initial TCI value (mainly for debugging) Returns: ONU driver or None if already exists """ with self._lock: if onu_id in self._onus: logger.error("OnuAdd: ONU {} is already in the database".format(onu_id)) return None onu = OnuDriver(onu_name, onu_id, self, tci) self._onus[onu_id] = onu self._onus_by_name[onu_name] = onu return onu
def OnuAdd(self, onu_name: OnuName, onu_id: OnuSbiId, tci: int = 0) -> OnuDriver: """ Add ONU to the OLT database. Args: onu_name: ONU Name onu_id: ONU Id tci: initial TCI value (mainly for debugging) Returns: ONU driver or None if already exists """ with self._lock: if onu_id in self._onus: logger.error("OnuAdd: ONU {} is already in the database".format(onu_id)) return None onu = OnuDriver(onu_name, onu_id, self, tci) self._onus[onu_id] = onu self._onus_by_name[onu_name] = onu return onu
Python
def OnuAddUpdate(self, onu_name: OnuName, onu_id: OnuSbiId, tci: int = 0) -> OnuDriver: """ Add ONU if doesn't exist or update onu_id if it is already in the database Args: onu_name: ONU Name onu_id: ONU SBI Id tci: initial TCI value (mainly for debugging) Returns: ONU driver or None in case of error """ with self._lock: if onu_name not in self._onus_by_name: return self.OnuAdd(onu_name, onu_id, tci) # ONU id is already in the database. Re-assign onu_id onu = self._onus_by_name[onu_name] if onu.onu_id != onu_id: if onu_id in self._onus: logger.error("OnuAddUpdate: Can't assign SBI ID {} to ONU {}. It is ialready assigned to ONU {}".\ format(onu_id, onu_name, self._onus[onu_id].onu_name)) return None del self._onus[onu.onu_id] onu.set_onu_id(onu_id) self._onus[onu_id] = onu return onu
def OnuAddUpdate(self, onu_name: OnuName, onu_id: OnuSbiId, tci: int = 0) -> OnuDriver: """ Add ONU if doesn't exist or update onu_id if it is already in the database Args: onu_name: ONU Name onu_id: ONU SBI Id tci: initial TCI value (mainly for debugging) Returns: ONU driver or None in case of error """ with self._lock: if onu_name not in self._onus_by_name: return self.OnuAdd(onu_name, onu_id, tci) # ONU id is already in the database. Re-assign onu_id onu = self._onus_by_name[onu_name] if onu.onu_id != onu_id: if onu_id in self._onus: logger.error("OnuAddUpdate: Can't assign SBI ID {} to ONU {}. It is ialready assigned to ONU {}".\ format(onu_id, onu_name, self._onus[onu_id].onu_name)) return None del self._onus[onu.onu_id] onu.set_onu_id(onu_id) self._onus[onu_id] = onu return onu
Python
def OnuDelete(self, onu_name: OnuName): """ Delete ONU from the database. Args: onu_name: ONU Name """ with self._lock: if onu_name not in self._onus_by_name: logger.error("OnuDelete: ONU {} is not in the database" % onu_name) return onu = self._onus_by_name[onu_name] del self._onus_by_name[onu_name] del self._onus[onu.onu_id]
def OnuDelete(self, onu_name: OnuName): """ Delete ONU from the database. Args: onu_name: ONU Name """ with self._lock: if onu_name not in self._onus_by_name: logger.error("OnuDelete: ONU {} is not in the database" % onu_name) return onu = self._onus_by_name[onu_name] del self._onus_by_name[onu_name] del self._onus[onu.onu_id]
Python
def send(self, onu: OnuDriver, msg: RawMessage) -> bool: """ Send raw OMCI message to ONU. Args: onu: ONU object msg: raw OMCI message without CRC/MIC """ with self._lock: if not self._channel: logger.error("send: can't send message to ONU {}. No active channel".format(onu.onu_id)) return False return self._channel.send(self.olt_id, onu.onu_id, msg)
def send(self, onu: OnuDriver, msg: RawMessage) -> bool: """ Send raw OMCI message to ONU. Args: onu: ONU object msg: raw OMCI message without CRC/MIC """ with self._lock: if not self._channel: logger.error("send: can't send message to ONU {}. No active channel".format(onu.onu_id)) return False return self._channel.send(self.olt_id, onu.onu_id, msg)
Python
def recv(self, onu_id: OnuSbiId, msg: RawMessage): """ Receive an OMCI message. This function is called by an OmciChannel service when a new OMCI message is received from pOLT.<br> The function forwards the message to the relevant OnuDriver. Args: onu_id: ONU Id msg: raw OMCI message without CRC/MIC """ with self._lock: if onu_id not in self._onus: logger.error("Received packet for an unknown ONU {}".format(onu_id)) return onu = self._onus[onu_id] onu.recv(msg)
def recv(self, onu_id: OnuSbiId, msg: RawMessage): """ Receive an OMCI message. This function is called by an OmciChannel service when a new OMCI message is received from pOLT.<br> The function forwards the message to the relevant OnuDriver. Args: onu_id: ONU Id msg: raw OMCI message without CRC/MIC """ with self._lock: if onu_id not in self._onus: logger.error("Received packet for an unknown ONU {}".format(onu_id)) return onu = self._onus[onu_id] onu.recv(msg)
Python
def OltAddUpdate(cls, olt_id: PoltId, channel: 'OltCommChannel') -> Olt: """ Add OLT to the database or update an existing OLT. Args: olt_id: OLT Id channel: communication channel to pOLT Returns: Olt object instance """ with cls._lock: if olt_id in cls._olts: olt = cls._olts[olt_id] olt.set_channel(channel) else: olt = Olt(olt_id, channel) cls._olts[olt_id] = olt return olt
def OltAddUpdate(cls, olt_id: PoltId, channel: 'OltCommChannel') -> Olt: """ Add OLT to the database or update an existing OLT. Args: olt_id: OLT Id channel: communication channel to pOLT Returns: Olt object instance """ with cls._lock: if olt_id in cls._olts: olt = cls._olts[olt_id] olt.set_channel(channel) else: olt = Olt(olt_id, channel) cls._olts[olt_id] = olt return olt
Python
def OltDelete(cls, olt_id: PoltId): """ Delete OLT from the database. Args: olt_id: OLT Id """ with cls._lock: if olt_id not in cls._olts: logger.error("OltDelete: OLT {} is not in the database" % olt_id) return del cls._olts[olt_id]
def OltDelete(cls, olt_id: PoltId): """ Delete OLT from the database. Args: olt_id: OLT Id """ with cls._lock: if olt_id not in cls._olts: logger.error("OltDelete: OLT {} is not in the database" % olt_id) return del cls._olts[olt_id]
Python
def OltGetFirst(cls) -> Olt: """Get first OLT in the database Returns: Olt if successful, None if there are no Olts in the database """ with cls._lock: all_olts = list(cls._olts.values()) if len(all_olts) == 0: return None return all_olts[0]
def OltGetFirst(cls) -> Olt: """Get first OLT in the database Returns: Olt if successful, None if there are no Olts in the database """ with cls._lock: all_olts = list(cls._olts.values()) if len(all_olts) == 0: return None return all_olts[0]
Python
def commandline(): """ Configures the Argument Parser and returns the parsed commandline args. """ description = 'Cool Command-line Cheatsheets' help_general = 'The cheatsheet you want to see' help_list = 'List all available cheatsheets' help_colors = 'Print output without colors' help_inline = 'One cheat per line, this is the default' help_breakline = 'Break lines' argumentparser = ArgumentParser(description=description) printertype = argumentparser.add_mutually_exclusive_group() argumentparser.add_argument('--list', dest='listcheats', action="store_true", required=False, help=help_list) argumentparser.add_argument('--nc', dest='nocolor', action="store_false", required=False, help=help_colors) argumentparser.add_argument('cheatsheet', nargs='?', help=help_general) printertype.set_defaults(printer='InlinePrinter') printertype.add_argument('-l', help=help_inline, action='store_const', dest='printer', const='InlinePrinter') printertype.add_argument('-b', help=help_breakline, action='store_const', dest='printer', const='BreaklinePrinter') return argumentparser
def commandline(): """ Configures the Argument Parser and returns the parsed commandline args. """ description = 'Cool Command-line Cheatsheets' help_general = 'The cheatsheet you want to see' help_list = 'List all available cheatsheets' help_colors = 'Print output without colors' help_inline = 'One cheat per line, this is the default' help_breakline = 'Break lines' argumentparser = ArgumentParser(description=description) printertype = argumentparser.add_mutually_exclusive_group() argumentparser.add_argument('--list', dest='listcheats', action="store_true", required=False, help=help_list) argumentparser.add_argument('--nc', dest='nocolor', action="store_false", required=False, help=help_colors) argumentparser.add_argument('cheatsheet', nargs='?', help=help_general) printertype.set_defaults(printer='InlinePrinter') printertype.add_argument('-l', help=help_inline, action='store_const', dest='printer', const='InlinePrinter') printertype.add_argument('-b', help=help_breakline, action='store_const', dest='printer', const='BreaklinePrinter') return argumentparser
Python
def add_color(self, string): """ Adds color to the console output. :param string: The string to add color to. """ default_color = self.colors.DEFAULT param_color = self.colors.PARAM reset_color = self.colors.RESET string = string.replace('<', param_color + '<') string = string.replace('>', '>' + reset_color) colored = default_color + string + reset_color return colored
def add_color(self, string): """ Adds color to the console output. :param string: The string to add color to. """ default_color = self.colors.DEFAULT param_color = self.colors.PARAM reset_color = self.colors.RESET string = string.replace('<', param_color + '<') string = string.replace('>', '>' + reset_color) colored = default_color + string + reset_color return colored
Python
def printcheats(self, template): """ Loops over the entries in the ConfigParser and prints them with a specific template. :param template: Template to use with the format() function. """ sections = self.configparser.sections() sections.remove('main') for section in sections: print(section.upper()) for description in self.configparser[section]: value = self.configparser[section][description] value = self.add_color(value) if self.print_colored else value output = template.format(description.capitalize(), value) print(output)
def printcheats(self, template): """ Loops over the entries in the ConfigParser and prints them with a specific template. :param template: Template to use with the format() function. """ sections = self.configparser.sections() sections.remove('main') for section in sections: print(section.upper()) for description in self.configparser[section]: value = self.configparser[section][description] value = self.add_color(value) if self.print_colored else value output = template.format(description.capitalize(), value) print(output)
Python
def width(self): """ Width of the longest ConfigParser entry. """ width = 1 # Calculate new width for section in self.configparser.sections(): longest_width = len(max(self.configparser[section], key=len)) if longest_width > width: width = longest_width return str(width)
def width(self): """ Width of the longest ConfigParser entry. """ width = 1 # Calculate new width for section in self.configparser.sections(): longest_width = len(max(self.configparser[section], key=len)) if longest_width > width: width = longest_width return str(width)
Python
def printsheet(self): """ Sets the printer template to print inline and calls the Printer.printsheet(). """ print_format = "{0:<" + self.width + "} {1}" super().printcheats(print_format)
def printsheet(self): """ Sets the printer template to print inline and calls the Printer.printsheet(). """ print_format = "{0:<" + self.width + "} {1}" super().printcheats(print_format)
Python
def printsheet(self): """ Sets the printer template to print with newlines and calls the Printer.printsheet(). """ print_format = "{0} \n {1}" super().printcheats(print_format)
def printsheet(self): """ Sets the printer template to print with newlines and calls the Printer.printsheet(). """ print_format = "{0} \n {1}" super().printcheats(print_format)
Python
def create_printer(name): """ Returns a specific Printer Object. :param name: Printer Object to return. """ return PrinterFactory.printer_classes[name]
def create_printer(name): """ Returns a specific Printer Object. :param name: Printer Object to return. """ return PrinterFactory.printer_classes[name]
Python
def print_available_sheets(directory): """ Prints all available cheatsheets in the sheet folder to the stdout. :param directory: The directory where the cheatsheets are located. """ parser = ConfigParser() files = listdir(directory) for name in sorted(files): try: parser.read(path.join(directory, name)) print('{0}'.format(parser['main']['name'])) except ConfigParserError: # TOOD: What to do here? pass
def print_available_sheets(directory): """ Prints all available cheatsheets in the sheet folder to the stdout. :param directory: The directory where the cheatsheets are located. """ parser = ConfigParser() files = listdir(directory) for name in sorted(files): try: parser.read(path.join(directory, name)) print('{0}'.format(parser['main']['name'])) except ConfigParserError: # TOOD: What to do here? pass
Python
async def avatar_(self, ctx: commands.context, member: discord.Member=None): """View your's or someone's avatar""" member = member if member else ctx.author em = discord.Embed(color=member.color) em.set_image(url=member.avatar.url) em.set_author(name=f"{member.name}'s avatar!", icon_url=member.avatar.url) em.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url) await ctx.send(embed=em)
async def avatar_(self, ctx: commands.context, member: discord.Member=None): """View your's or someone's avatar""" member = member if member else ctx.author em = discord.Embed(color=member.color) em.set_image(url=member.avatar.url) em.set_author(name=f"{member.name}'s avatar!", icon_url=member.avatar.url) em.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url) await ctx.send(embed=em)
Python
async def emojiadd(self, ctx: commands.Context, emoji: str, *, name: str): """Creates an emoji in the server using a url""" async with aiohttp.ClientSession() as session: async with session.get(emoji) as r: try: imgOrGIF = BytesIO(await r.read()) bValue = imgOrGIF.getvalue() if r.status in range(200, 299): emojiCreate = await ctx.guild.create_custom_emoji(image=bValue, name=name) await ctx.send(embed=discord.Embed(description=f"**<:tick:897382645321850920> Successfully created emoji - {emojiCreate} with name: `{name}`**", color=discord.Color.green())) else: await ctx.send(embed=discord.Embed(description=f"<:error:897382665781669908> An error occured while creating the emoji | {r.status}", color=discord.Color.red())) except discord.HTTPException: await ctx.send(embed=discord.Embed(description=f"<:error:897382665781669908> The file size is too big!", color=discord.Color.red())) except Exception as e: print(e)
async def emojiadd(self, ctx: commands.Context, emoji: str, *, name: str): """Creates an emoji in the server using a url""" async with aiohttp.ClientSession() as session: async with session.get(emoji) as r: try: imgOrGIF = BytesIO(await r.read()) bValue = imgOrGIF.getvalue() if r.status in range(200, 299): emojiCreate = await ctx.guild.create_custom_emoji(image=bValue, name=name) await ctx.send(embed=discord.Embed(description=f"**<:tick:897382645321850920> Successfully created emoji - {emojiCreate} with name: `{name}`**", color=discord.Color.green())) else: await ctx.send(embed=discord.Embed(description=f"<:error:897382665781669908> An error occured while creating the emoji | {r.status}", color=discord.Color.red())) except discord.HTTPException: await ctx.send(embed=discord.Embed(description=f"<:error:897382665781669908> The file size is too big!", color=discord.Color.red())) except Exception as e: print(e)
Python
async def stealemoji(self, ctx: commands.Context, emoji: Union[discord.Emoji, discord.PartialEmoji], *, name: str): """Steal an emoji for another server.... The bot adds the emoji to this server""" try: emoji_bytes = await emoji.read() emoji_create = await ctx.guild.create_custom_emoji(image=emoji_bytes, name=name) await ctx.send(embed=discord.Embed(description=f"**<:tick:897382645321850920> Successfully created emoji - {emoji_create} with name: `{name}`**", color=discord.Color.green())) except Exception as e: error = str(e).capitalize() return await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> An error occurred while creating the emoji\n`{error}`**", color=discord.Color.red()))
async def stealemoji(self, ctx: commands.Context, emoji: Union[discord.Emoji, discord.PartialEmoji], *, name: str): """Steal an emoji for another server.... The bot adds the emoji to this server""" try: emoji_bytes = await emoji.read() emoji_create = await ctx.guild.create_custom_emoji(image=emoji_bytes, name=name) await ctx.send(embed=discord.Embed(description=f"**<:tick:897382645321850920> Successfully created emoji - {emoji_create} with name: `{name}`**", color=discord.Color.green())) except Exception as e: error = str(e).capitalize() return await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> An error occurred while creating the emoji\n`{error}`**", color=discord.Color.red()))
Python
async def whois(self, ctx: commands.Context, user: Union[discord.Member, discord.User]=None): """Get information about a user or yourself""" user = user or ctx.author accType = "Bot" if user.bot else "Human" badge_emojis = { "bug_hunter": str(self.bot.get_emoji(928298721916112916)), "bug_hunter_level_2": str(self.bot.get_emoji(928298721303736361)), "discord_certified_moderator": str(self.bot.get_emoji(928298721475698708)), "early_supporter": str(self.bot.get_emoji(928298721496686692)), "verified_bot_developer": str(self.bot.get_emoji(928299192428953660)), "hypesquad": str(self.bot.get_emoji(930418236678340668)), "hypesquad_balance": str(self.bot.get_emoji(928299452446412821)), "hypesquad_bravery": str(self.bot.get_emoji(928299808974843984)), "hypesquad_brilliance": str(self.bot.get_emoji(928299672840327208)), "partner": str(self.bot.get_emoji(928502472891330622)), "staff": str(self.bot.get_emoji(928502668224262195)) } def get_badges(user: Union[discord.User, discord.Member]): badges = [] for badge, value in iter(user.public_flags): if value and badge in badge_emojis.keys(): badges.append(badge_emojis[badge]) return badges if not user in ctx.guild.members: em = discord.Embed( description=f"""**• Username: `{user}` • UserID: `{user.id}` • Account Type: `{accType}` • Created at: {discord.utils.format_dt(user.created_at)} • Badges: {" ".join(get_badges(user)) if len(get_badges(user)) > 0 else "`-`"}**""", color=discord.Color.green() ).set_author(name=user.name, icon_url=user.avatar.url).set_thumbnail(url=user.avatar.url).set_footer(text="Note: This user is not from this server", icon_url=user.avatar.url) user_for_banner = await self.bot.fetch_user(user.id) if user_for_banner.banner: em.set_image(url=user_for_banner.banner.url) return await ctx.send(embed=em) member: discord.Member = ctx.guild.get_member(user.id) def timedOut(member: discord.Member): """Gets a string type of `member.timed_out` rather than a boolean type""" if member.timed_out: return "Yes" else: return "No" def getRoles(member: discord.Member): """Gets the user roles""" if len(list(member.roles)) == 0: return "-" else: sorted_roles = sorted( [role for role in member.roles[1:]], key=lambda x: x.position, reverse=True ) roles = " ".join(role.mention for role in sorted_roles) return roles nick = user.nick if user.nick else "-" embed = discord.Embed( description=f"""**• Username: `{user}` • UserID: `{user.id}` • Nickname: `{nick}` • Account Type: `{accType}` • Created at: {discord.utils.format_dt(user.created_at)} • Joined at: {discord.utils.format_dt(member.joined_at)} • Timed Out: `{timedOut(member)}` • Roles: {getRoles(member)} • Badges: {" ".join(get_badges(user)) if len(get_badges(user)) > 0 else "`-`"}**""", color=user.color ).set_author(name=user.name, icon_url=user.avatar.url).set_thumbnail(url=user.avatar.url) userForBanner = await self.bot.fetch_user(user.id) if userForBanner.banner: embed.set_image(url=userForBanner.banner.url) return await ctx.send(embed=embed)
async def whois(self, ctx: commands.Context, user: Union[discord.Member, discord.User]=None): """Get information about a user or yourself""" user = user or ctx.author accType = "Bot" if user.bot else "Human" badge_emojis = { "bug_hunter": str(self.bot.get_emoji(928298721916112916)), "bug_hunter_level_2": str(self.bot.get_emoji(928298721303736361)), "discord_certified_moderator": str(self.bot.get_emoji(928298721475698708)), "early_supporter": str(self.bot.get_emoji(928298721496686692)), "verified_bot_developer": str(self.bot.get_emoji(928299192428953660)), "hypesquad": str(self.bot.get_emoji(930418236678340668)), "hypesquad_balance": str(self.bot.get_emoji(928299452446412821)), "hypesquad_bravery": str(self.bot.get_emoji(928299808974843984)), "hypesquad_brilliance": str(self.bot.get_emoji(928299672840327208)), "partner": str(self.bot.get_emoji(928502472891330622)), "staff": str(self.bot.get_emoji(928502668224262195)) } def get_badges(user: Union[discord.User, discord.Member]): badges = [] for badge, value in iter(user.public_flags): if value and badge in badge_emojis.keys(): badges.append(badge_emojis[badge]) return badges if not user in ctx.guild.members: em = discord.Embed( description=f"""**• Username: `{user}` • UserID: `{user.id}` • Account Type: `{accType}` • Created at: {discord.utils.format_dt(user.created_at)} • Badges: {" ".join(get_badges(user)) if len(get_badges(user)) > 0 else "`-`"}**""", color=discord.Color.green() ).set_author(name=user.name, icon_url=user.avatar.url).set_thumbnail(url=user.avatar.url).set_footer(text="Note: This user is not from this server", icon_url=user.avatar.url) user_for_banner = await self.bot.fetch_user(user.id) if user_for_banner.banner: em.set_image(url=user_for_banner.banner.url) return await ctx.send(embed=em) member: discord.Member = ctx.guild.get_member(user.id) def timedOut(member: discord.Member): """Gets a string type of `member.timed_out` rather than a boolean type""" if member.timed_out: return "Yes" else: return "No" def getRoles(member: discord.Member): """Gets the user roles""" if len(list(member.roles)) == 0: return "-" else: sorted_roles = sorted( [role for role in member.roles[1:]], key=lambda x: x.position, reverse=True ) roles = " ".join(role.mention for role in sorted_roles) return roles nick = user.nick if user.nick else "-" embed = discord.Embed( description=f"""**• Username: `{user}` • UserID: `{user.id}` • Nickname: `{nick}` • Account Type: `{accType}` • Created at: {discord.utils.format_dt(user.created_at)} • Joined at: {discord.utils.format_dt(member.joined_at)} • Timed Out: `{timedOut(member)}` • Roles: {getRoles(member)} • Badges: {" ".join(get_badges(user)) if len(get_badges(user)) > 0 else "`-`"}**""", color=user.color ).set_author(name=user.name, icon_url=user.avatar.url).set_thumbnail(url=user.avatar.url) userForBanner = await self.bot.fetch_user(user.id) if userForBanner.banner: embed.set_image(url=userForBanner.banner.url) return await ctx.send(embed=embed)
Python
def timedOut(member: discord.Member): """Gets a string type of `member.timed_out` rather than a boolean type""" if member.timed_out: return "Yes" else: return "No"
def timedOut(member: discord.Member): """Gets a string type of `member.timed_out` rather than a boolean type""" if member.timed_out: return "Yes" else: return "No"
Python
async def nuke_(self, ctx, *, channel: discord.TextChannel=None): """Delete all messages in a channel\nExample: `nuke [channel]\nIf channel is None then it will nuke the channel the command is used in`""" channel = channel if channel else ctx.channel if not ctx.author.guild_permissions.manage_channels: em = discord.Embed(description="<:error:897382665781669908> You can't do that!", color=discord.Color.red()) return await ctx.send(embed=em) embed1 = discord.Embed(description=f"Are you sure you want to **NUKE** {channel.mention}?\n------------------------------------------------\nRespond Within **15** seconds!", color=discord.Color.orange()) message = await ctx.send(embed=embed1) await message.edit(embed=embed1, view=NukeView(ctx, channel, message))
async def nuke_(self, ctx, *, channel: discord.TextChannel=None): """Delete all messages in a channel\nExample: `nuke [channel]\nIf channel is None then it will nuke the channel the command is used in`""" channel = channel if channel else ctx.channel if not ctx.author.guild_permissions.manage_channels: em = discord.Embed(description="<:error:897382665781669908> You can't do that!", color=discord.Color.red()) return await ctx.send(embed=em) embed1 = discord.Embed(description=f"Are you sure you want to **NUKE** {channel.mention}?\n------------------------------------------------\nRespond Within **15** seconds!", color=discord.Color.orange()) message = await ctx.send(embed=embed1) await message.edit(embed=embed1, view=NukeView(ctx, channel, message))
Python
async def user(self, ctx: commands.Context, user: discord.Member, amount): """Delete message of a user in the channel""" def is_user(m): """Checks the user's messages in the channel""" return m.author == user channel: discord.TextChannel = ctx.channel deleted = await channel.purge(limit=amount, check=is_user) embed = discord.Embed(description=f"**<:tick:897382645321850920> Deleted {len(deleted)} messages of {user.mention}**", color=discord.Color.green()) await ctx.send(embed=embed)
async def user(self, ctx: commands.Context, user: discord.Member, amount): """Delete message of a user in the channel""" def is_user(m): """Checks the user's messages in the channel""" return m.author == user channel: discord.TextChannel = ctx.channel deleted = await channel.purge(limit=amount, check=is_user) embed = discord.Embed(description=f"**<:tick:897382645321850920> Deleted {len(deleted)} messages of {user.mention}**", color=discord.Color.green()) await ctx.send(embed=embed)
Python
async def timeout(self, ctx: commands.Context, user: discord.Member, time, *, reason: str = "No reason provided"): """Timeout/Mute a user in the server""" if user == ctx.author: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> You can't timeout yourself!**", color=discord.Color.red())) try: timeConvert = humanfriendly.parse_timespan(time) await user.timeout(discord.utils.utcnow()+datetime.timedelta(seconds=timeConvert), reason=reason) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully muted {user.mention} for {time} | Reason: {reason}**", color=discord.Color.green()) await ctx.send(embed=embed) await user.send(embed=discord.Embed(description=f"**<:error:897382665781669908> You were muted in {ctx.guild.name} | Reason: {reason}**", color=discord.Color.red())) except discord.Forbidden: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> This user has a higher or equal role to me. **", color=discord.Color.red()))
async def timeout(self, ctx: commands.Context, user: discord.Member, time, *, reason: str = "No reason provided"): """Timeout/Mute a user in the server""" if user == ctx.author: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> You can't timeout yourself!**", color=discord.Color.red())) try: timeConvert = humanfriendly.parse_timespan(time) await user.timeout(discord.utils.utcnow()+datetime.timedelta(seconds=timeConvert), reason=reason) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully muted {user.mention} for {time} | Reason: {reason}**", color=discord.Color.green()) await ctx.send(embed=embed) await user.send(embed=discord.Embed(description=f"**<:error:897382665781669908> You were muted in {ctx.guild.name} | Reason: {reason}**", color=discord.Color.red())) except discord.Forbidden: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> This user has a higher or equal role to me. **", color=discord.Color.red()))
Python
async def unmute(self, ctx: commands.Context, user: discord.Member, *, reason: str = "No reason provided"): """Unmutes a user from the server""" if user == ctx.author: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> You can't unmute yourself!**", color=discord.Color.red())) if not user.timed_out: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> That user isn't muted!**", color=discord.Color.red())) await user.timeout(None, reason=reason) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully unmuted {user.mention} | Reason: {reason}**", color=discord.Color.green()) await ctx.send(embed=embed)
async def unmute(self, ctx: commands.Context, user: discord.Member, *, reason: str = "No reason provided"): """Unmutes a user from the server""" if user == ctx.author: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> You can't unmute yourself!**", color=discord.Color.red())) if not user.timed_out: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> That user isn't muted!**", color=discord.Color.red())) await user.timeout(None, reason=reason) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully unmuted {user.mention} | Reason: {reason}**", color=discord.Color.green()) await ctx.send(embed=embed)
Python
async def create_(self, ctx: commands.Context, channel: discord.TextChannel, *, name = None): """Creates a panel in a channel through which users can interact and open tickets""" if not channel: embed = discord.Embed( description="**<:error:897382665781669908> Please enter a channel to make the panel in!**", color=discord.Color.red() ) return await ctx.send(embed=embed) if not name: embed = discord.Embed( description="**<:error:897382665781669908> Please enter a name!**", color=discord.Color.red() ) return await ctx.send(embed=embed) if not ctx.author.guild_permissions.manage_channels: embed = discord.Embed( description="**<:error:897382665781669908> You can't do that!**", color=discord.Color.red() ) return await ctx.send(embed=embed) if channel == ctx.channel: panel = discord.Embed( title=name, description="To create a ticket react with 📩", color=discord.Color.green(), ) panel.set_footer(text=f"{self.bot.user.name} - Ticket Panel", icon_url=self.bot.user.avatar.url) message = await channel.send(embed=panel, view=TicketPanelView(self.bot)) try: await ctx.author.send(embed=discord.Embed(description=f"**Panel id** of the panel you just created in <#{channel.id}>: `{message.id}`", color=discord.Color.green())) except discord.Forbidden: print("Couldn't DM that user!") if channel != ctx.channel: panel1 = discord.Embed( title=name, description="To create a ticket react with 📩", color=discord.Color.green(), ) panel1.set_footer(text=f"{self.bot.user.name} - Ticket Panel", icon_url=self.bot.user.avatar.url) message = await channel.send(embed=panel1, view=TicketPanelView(self.bot)) embed2 = discord.Embed(description=f"**<:tick:897382645321850920> Successfully posted the panel in {channel.mention}\n\nPanel ID: `{message.id}`**", color=discord.Color.green()) await ctx.send(embed=embed2)
async def create_(self, ctx: commands.Context, channel: discord.TextChannel, *, name = None): """Creates a panel in a channel through which users can interact and open tickets""" if not channel: embed = discord.Embed( description="**<:error:897382665781669908> Please enter a channel to make the panel in!**", color=discord.Color.red() ) return await ctx.send(embed=embed) if not name: embed = discord.Embed( description="**<:error:897382665781669908> Please enter a name!**", color=discord.Color.red() ) return await ctx.send(embed=embed) if not ctx.author.guild_permissions.manage_channels: embed = discord.Embed( description="**<:error:897382665781669908> You can't do that!**", color=discord.Color.red() ) return await ctx.send(embed=embed) if channel == ctx.channel: panel = discord.Embed( title=name, description="To create a ticket react with 📩", color=discord.Color.green(), ) panel.set_footer(text=f"{self.bot.user.name} - Ticket Panel", icon_url=self.bot.user.avatar.url) message = await channel.send(embed=panel, view=TicketPanelView(self.bot)) try: await ctx.author.send(embed=discord.Embed(description=f"**Panel id** of the panel you just created in <#{channel.id}>: `{message.id}`", color=discord.Color.green())) except discord.Forbidden: print("Couldn't DM that user!") if channel != ctx.channel: panel1 = discord.Embed( title=name, description="To create a ticket react with 📩", color=discord.Color.green(), ) panel1.set_footer(text=f"{self.bot.user.name} - Ticket Panel", icon_url=self.bot.user.avatar.url) message = await channel.send(embed=panel1, view=TicketPanelView(self.bot)) embed2 = discord.Embed(description=f"**<:tick:897382645321850920> Successfully posted the panel in {channel.mention}\n\nPanel ID: `{message.id}`**", color=discord.Color.green()) await ctx.send(embed=embed2)
Python
async def delete_(self, ctx: commands.Context, channel: discord.TextChannel, panel_id: int): """Deletes a previously built panel in the server. Requires the `panel_id` which is provided at the time of the creation of the panel""" message = await channel.fetch_message(panel_id) try: await message.delete() embed = discord.Embed(description="**<:tick:897382645321850920> Successfully deleted the panel!**", color=discord.Color.green()) await ctx.send(embed=embed) except discord.Forbidden: embed = discord.Embed(description="**<:error:897382665781669908> I couldn't do that!**", color=discord.Color.green()) await ctx.send(embed=embed) except discord.NotFound: embed = discord.Embed(description=f"**<:error:897382665781669908> I couldn't find a panel with id `{panel_id}`! Please try again after checking the id!**") await ctx.send(embed=embed)
async def delete_(self, ctx: commands.Context, channel: discord.TextChannel, panel_id: int): """Deletes a previously built panel in the server. Requires the `panel_id` which is provided at the time of the creation of the panel""" message = await channel.fetch_message(panel_id) try: await message.delete() embed = discord.Embed(description="**<:tick:897382645321850920> Successfully deleted the panel!**", color=discord.Color.green()) await ctx.send(embed=embed) except discord.Forbidden: embed = discord.Embed(description="**<:error:897382665781669908> I couldn't do that!**", color=discord.Color.green()) await ctx.send(embed=embed) except discord.NotFound: embed = discord.Embed(description=f"**<:error:897382665781669908> I couldn't find a panel with id `{panel_id}`! Please try again after checking the id!**") await ctx.send(embed=embed)
Python
async def edit_(self, ctx: commands.Context, channel: discord.TextChannel, panel_id: int, *, name: str): """Edits a previously built panel in the server. Requires the `panel_id` which is provided at the time of the creation of the panel""" message = await channel.fetch_message(panel_id) try: embed1 = discord.Embed(title=name, description="To create a ticket react with 📩", color=discord.Color.green()) await message.edit(embed=embed1) embed = discord.Embed(description="**<:tick:897382645321850920> Successfully edited the panel!**", color=discord.Color.green()) await ctx.send(embed=embed) except discord.Forbidden: embed = discord.Embed(description="**<:error:897382665781669908> I couldn't do that!**", color=discord.Color.green()) await ctx.send(embed=embed) except discord.NotFound: embed = discord.Embed(description=f"**<:error:897382665781669908> I couldn't find a panel with id `{panel_id}`! Please try again after checking the id!**") await ctx.send(embed=embed)
async def edit_(self, ctx: commands.Context, channel: discord.TextChannel, panel_id: int, *, name: str): """Edits a previously built panel in the server. Requires the `panel_id` which is provided at the time of the creation of the panel""" message = await channel.fetch_message(panel_id) try: embed1 = discord.Embed(title=name, description="To create a ticket react with 📩", color=discord.Color.green()) await message.edit(embed=embed1) embed = discord.Embed(description="**<:tick:897382645321850920> Successfully edited the panel!**", color=discord.Color.green()) await ctx.send(embed=embed) except discord.Forbidden: embed = discord.Embed(description="**<:error:897382665781669908> I couldn't do that!**", color=discord.Color.green()) await ctx.send(embed=embed) except discord.NotFound: embed = discord.Embed(description=f"**<:error:897382665781669908> I couldn't find a panel with id `{panel_id}`! Please try again after checking the id!**") await ctx.send(embed=embed)
Python
async def reset_(self, ctx: commands.Context): """Resets the ticket count set of the server""" embed = discord.Embed(description=f"Are you sure you want to reset the **Ticket Count**?\n------------------------------------------------\nRespond Within **15** seconds!", color=discord.Color.orange()) message = await ctx.send(embed=embed) await message.edit(embed=embed, view=TicketResetView(ctx, message, self.bot))
async def reset_(self, ctx: commands.Context): """Resets the ticket count set of the server""" embed = discord.Embed(description=f"Are you sure you want to reset the **Ticket Count**?\n------------------------------------------------\nRespond Within **15** seconds!", color=discord.Color.orange()) message = await ctx.send(embed=embed) await message.edit(embed=embed, view=TicketResetView(ctx, message, self.bot))
Python
async def category_(self, ctx: commands.Context, categoryID: int=None): """Sets the category for tickets. Highly reccomended.""" try: if categoryID is None: self.bot.dbcursor.execute(f'SELECT category FROM ticket WHERE guild_id=?', (ctx.guild.id,)) dataCheck = self.bot.dbcursor.fetchone() if not dataCheck: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> You have not assigned a category to tickets yet**", color=discord.Color.red())) self.bot.dbcursor.execute(f'SELECT * FROM ticket WHERE guild_id=?', (ctx.guild.id,)) categoryFind = self.bot.dbcursor.fetchone() cat = categoryFind[2] return await ctx.send(embed=discord.Embed(description=f"**The category_id set for this server is {cat}**", color=discord.Color.green())) self.bot.dbcursor.execute(f'SELECT category FROM ticket WHERE guild_id=?', (ctx.guild.id,)) data = self.bot.dbcursor.fetchone() if not data: self.bot.dbcursor.execute(f'SELECT * FROM ticket WHERE guild_id=?', (ctx.guild.id,)) dataCheck2 = self.bot.dbcursor.fetchone() if not dataCheck2[0]: self.bot.dbcursor.execute(f'INSERT INTO ticket (guild_id, category) VALUES(?,?)', (ctx.guild.id, categoryID)) else: self.bot.dbcursor.execute(f'INSERT INTO ticket (category) VALUES(?) WHERE guild_id=?', (categoryID, ctx.guild.id)) if data: self.bot.dbcursor.execute(f'UPDATE ticket SET category = ? WHERE guild_id=?', (categoryID, ctx.guild.id)) self.bot.db.commit() category = discord.utils.get(ctx.guild.categories, id=categoryID) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully added `{category}` as the ticket category!\n\nIf you want to keep ticket view permissions, make sure to change the category permissions.**", color=discord.Color.green()) await ctx.send(embed=embed) except Exception as e: print(e)
async def category_(self, ctx: commands.Context, categoryID: int=None): """Sets the category for tickets. Highly reccomended.""" try: if categoryID is None: self.bot.dbcursor.execute(f'SELECT category FROM ticket WHERE guild_id=?', (ctx.guild.id,)) dataCheck = self.bot.dbcursor.fetchone() if not dataCheck: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> You have not assigned a category to tickets yet**", color=discord.Color.red())) self.bot.dbcursor.execute(f'SELECT * FROM ticket WHERE guild_id=?', (ctx.guild.id,)) categoryFind = self.bot.dbcursor.fetchone() cat = categoryFind[2] return await ctx.send(embed=discord.Embed(description=f"**The category_id set for this server is {cat}**", color=discord.Color.green())) self.bot.dbcursor.execute(f'SELECT category FROM ticket WHERE guild_id=?', (ctx.guild.id,)) data = self.bot.dbcursor.fetchone() if not data: self.bot.dbcursor.execute(f'SELECT * FROM ticket WHERE guild_id=?', (ctx.guild.id,)) dataCheck2 = self.bot.dbcursor.fetchone() if not dataCheck2[0]: self.bot.dbcursor.execute(f'INSERT INTO ticket (guild_id, category) VALUES(?,?)', (ctx.guild.id, categoryID)) else: self.bot.dbcursor.execute(f'INSERT INTO ticket (category) VALUES(?) WHERE guild_id=?', (categoryID, ctx.guild.id)) if data: self.bot.dbcursor.execute(f'UPDATE ticket SET category = ? WHERE guild_id=?', (categoryID, ctx.guild.id)) self.bot.db.commit() category = discord.utils.get(ctx.guild.categories, id=categoryID) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully added `{category}` as the ticket category!\n\nIf you want to keep ticket view permissions, make sure to change the category permissions.**", color=discord.Color.green()) await ctx.send(embed=embed) except Exception as e: print(e)
Python
async def add(self, ctx: commands.Context, user: discord.Member): """Adds a user in the ticket""" self.bot.dbcursor.execute(f'SELECT * FROM tickets WHERE guild_id=? AND channel_id=?', (ctx.guild.id, ctx.channel.id)) data = self.bot.dbcursor.fetchone() if ctx.channel.id != data[1]: await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> Looks like either this channel is not a ticket channel or you aren't in the same channel**", color=discord.Color.red())) if user in ctx.channel.members: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> That user is already in the ticket**", color=discord.Color.red)) channel: discord.TextChannel = ctx.channel perms = channel.overwrites_for(user) perms.view_channel = True perms.send_messages = True perms.read_message_history = True await channel.set_permissions(user, overwrite=perms) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully added {user.mention} in the ticket!**", color=discord.Color.green()) await ctx.send(embed=embed)
async def add(self, ctx: commands.Context, user: discord.Member): """Adds a user in the ticket""" self.bot.dbcursor.execute(f'SELECT * FROM tickets WHERE guild_id=? AND channel_id=?', (ctx.guild.id, ctx.channel.id)) data = self.bot.dbcursor.fetchone() if ctx.channel.id != data[1]: await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> Looks like either this channel is not a ticket channel or you aren't in the same channel**", color=discord.Color.red())) if user in ctx.channel.members: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> That user is already in the ticket**", color=discord.Color.red)) channel: discord.TextChannel = ctx.channel perms = channel.overwrites_for(user) perms.view_channel = True perms.send_messages = True perms.read_message_history = True await channel.set_permissions(user, overwrite=perms) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully added {user.mention} in the ticket!**", color=discord.Color.green()) await ctx.send(embed=embed)
Python
async def remove(self, ctx: commands.Context, user: discord.Member): """Removes a user from a ticket. Note: It can't be the user who created the ticket or a person with admin""" self.bot.dbcursor.execute(f'SELECT * FROM tickets WHERE guild_id=? AND channel_id=?', (ctx.guild.id, ctx.channel.id)) data = self.bot.dbcursor.fetchone() if ctx.channel.id != data[1]: await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> Looks like either this channel is not a ticket channel or you aren't in the same channel**", color=discord.Color.red())) if user.id == data[2]: embed2 = discord.Embed(description=f"**<:error:897382665781669908> {user.mention} is the one who opened a ticket\nYou can't remove them from the ticket!**", color=discord.Color.red()) await ctx.send(embed=embed2) if user.guild_permissions.administrator or user.guild_permissions.manage_channels: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> That user is a *MOD/ADMIN*.**", color=discord.Color.red())) if not user in ctx.channel.members: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> That user is already not in the ticket**", color=discord.Color.red)) channel: discord.TextChannel = ctx.channel perms = channel.overwrites_for(user) perms.view_channel = False perms.send_messages = False perms.read_message_history = False await channel.set_permissions(user, overwrite=perms) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully removed {user.mention} from the ticket!**", color=discord.Color.green()) await ctx.send(embed=embed)
async def remove(self, ctx: commands.Context, user: discord.Member): """Removes a user from a ticket. Note: It can't be the user who created the ticket or a person with admin""" self.bot.dbcursor.execute(f'SELECT * FROM tickets WHERE guild_id=? AND channel_id=?', (ctx.guild.id, ctx.channel.id)) data = self.bot.dbcursor.fetchone() if ctx.channel.id != data[1]: await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> Looks like either this channel is not a ticket channel or you aren't in the same channel**", color=discord.Color.red())) if user.id == data[2]: embed2 = discord.Embed(description=f"**<:error:897382665781669908> {user.mention} is the one who opened a ticket\nYou can't remove them from the ticket!**", color=discord.Color.red()) await ctx.send(embed=embed2) if user.guild_permissions.administrator or user.guild_permissions.manage_channels: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> That user is a *MOD/ADMIN*.**", color=discord.Color.red())) if not user in ctx.channel.members: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> That user is already not in the ticket**", color=discord.Color.red)) channel: discord.TextChannel = ctx.channel perms = channel.overwrites_for(user) perms.view_channel = False perms.send_messages = False perms.read_message_history = False await channel.set_permissions(user, overwrite=perms) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully removed {user.mention} from the ticket!**", color=discord.Color.green()) await ctx.send(embed=embed)
Python
async def role(self, ctx: commands.Context, switch: str, *, role: discord.Role): """Adds a role or removes the role from a server.\nExample: `ticket role add @SOMEROLE` `ticket role remove remove @SOMEROLE`""" self.bot.dbcursor.execute(f'SELECT * FROM tickets WHERE guild_id=? AND channel_id=?', (ctx.guild.id, ctx.channel.id)) data = self.bot.dbcursor.fetchone() if ctx.channel.id != data[1]: await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> Looks like either this channel is not a ticket channel or you aren't in the same channel**", color=discord.Color.red())) if switch.lower() == "add": channel: discord.Channel = ctx.channel perms = channel.overwrites_for(role) perms.view_channel = True perms.send_messages = True perms.read_message_history = True await channel.set_permissions(role, overwrite=perms) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully added {role.mention} in the ticket!**", color=discord.Color.green()) await ctx.send(embed=embed) if switch.lower() == "remove": channel: discord.Channel = ctx.channel perms = channel.overwrites_for(role) perms.view_channel = False perms.send_messages = False perms.read_message_history = False await channel.set_permissions(role, overwrite=perms) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully added {role.mention} in the ticket!**", color=discord.Color.green()) await ctx.send(embed=embed)
async def role(self, ctx: commands.Context, switch: str, *, role: discord.Role): """Adds a role or removes the role from a server.\nExample: `ticket role add @SOMEROLE` `ticket role remove remove @SOMEROLE`""" self.bot.dbcursor.execute(f'SELECT * FROM tickets WHERE guild_id=? AND channel_id=?', (ctx.guild.id, ctx.channel.id)) data = self.bot.dbcursor.fetchone() if ctx.channel.id != data[1]: await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> Looks like either this channel is not a ticket channel or you aren't in the same channel**", color=discord.Color.red())) if switch.lower() == "add": channel: discord.Channel = ctx.channel perms = channel.overwrites_for(role) perms.view_channel = True perms.send_messages = True perms.read_message_history = True await channel.set_permissions(role, overwrite=perms) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully added {role.mention} in the ticket!**", color=discord.Color.green()) await ctx.send(embed=embed) if switch.lower() == "remove": channel: discord.Channel = ctx.channel perms = channel.overwrites_for(role) perms.view_channel = False perms.send_messages = False perms.read_message_history = False await channel.set_permissions(role, overwrite=perms) embed = discord.Embed(description=f"**<:tick:897382645321850920> Successfully added {role.mention} in the ticket!**", color=discord.Color.green()) await ctx.send(embed=embed)
Python
async def covid_(self, ctx: commands.Context): """Command group to get covid stats use `covid` for more info""" if ctx.invoked_subcommand is None: embed = discord.Embed(title="Covid Info", description="**__Commands__:** \n-->`global`:\nGets Global covid info\naliases: `world` `all`\n\n-->`country` \nDirectly type the country you want.\nExample: \n`+covid country India`\n`+covid country USA`", color=discord.Color.green()) await ctx.send(embed=embed)
async def covid_(self, ctx: commands.Context): """Command group to get covid stats use `covid` for more info""" if ctx.invoked_subcommand is None: embed = discord.Embed(title="Covid Info", description="**__Commands__:** \n-->`global`:\nGets Global covid info\naliases: `world` `all`\n\n-->`country` \nDirectly type the country you want.\nExample: \n`+covid country India`\n`+covid country USA`", color=discord.Color.green()) await ctx.send(embed=embed)
Python
async def country_(self, ctx, *, country: str): """Get covid stats of a country\nExample: `covid country India` and `covid country USA`""" em = discord.Embed(description="**Fetching information <a:loading:911568431315292211>**", color=discord.Color.green()) message = await ctx.send(embed=em) url = f"https://coronavirus-19-api.herokuapp.com/countries/{country}" stats = await self.bot.session.get(url) json_stats = await stats.json() country = json_stats["country"] totalCases = json_stats["cases"] todayCases = json_stats["todayCases"] totalDeaths = json_stats["deaths"] todayDeaths = json_stats["todayDeaths"] recovered = json_stats["recovered"] active = json_stats["active"] critical = json_stats["critical"] casesPerOneMillion = json_stats["casesPerOneMillion"] deathsPerOneMillion = json_stats["deathsPerOneMillion"] totalTests = json_stats["totalTests"] testsPerOneMillion = json_stats["testsPerOneMillion"] embed = discord.Embed(title=f"**COVID-19 Status Of {country}**!", description="This Information Isn't Live Always, Hence It May Not Be Accurate!", colour=discord.Color.random()) embed.add_field(name="**Total Cases**", value=totalCases, inline=True) embed.add_field(name="**Today Cases**", value=todayCases, inline=True) embed.add_field(name="**Total Deaths**", value=totalDeaths, inline=True) embed.add_field(name="**Today Deaths**", value=todayDeaths, inline=True) embed.add_field(name="**Recovered**", value=recovered, inline=True) embed.add_field(name="**Active**", value=active, inline=True) embed.add_field(name="**Critical**", value=critical, inline=True) embed.add_field(name="**Cases Per One Million**", value=casesPerOneMillion, inline=True) embed.add_field(name="**Deaths Per One Million**", value=deathsPerOneMillion, inline=True) embed.add_field(name="**Total Tests**", value=totalTests, inline=True) embed.add_field(name="**Tests Per One Million**", value=testsPerOneMillion, inline=True) embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/564520348821749766/701422183217365052/2Q.png") embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url) await message.edit(embed=embed)
async def country_(self, ctx, *, country: str): """Get covid stats of a country\nExample: `covid country India` and `covid country USA`""" em = discord.Embed(description="**Fetching information <a:loading:911568431315292211>**", color=discord.Color.green()) message = await ctx.send(embed=em) url = f"https://coronavirus-19-api.herokuapp.com/countries/{country}" stats = await self.bot.session.get(url) json_stats = await stats.json() country = json_stats["country"] totalCases = json_stats["cases"] todayCases = json_stats["todayCases"] totalDeaths = json_stats["deaths"] todayDeaths = json_stats["todayDeaths"] recovered = json_stats["recovered"] active = json_stats["active"] critical = json_stats["critical"] casesPerOneMillion = json_stats["casesPerOneMillion"] deathsPerOneMillion = json_stats["deathsPerOneMillion"] totalTests = json_stats["totalTests"] testsPerOneMillion = json_stats["testsPerOneMillion"] embed = discord.Embed(title=f"**COVID-19 Status Of {country}**!", description="This Information Isn't Live Always, Hence It May Not Be Accurate!", colour=discord.Color.random()) embed.add_field(name="**Total Cases**", value=totalCases, inline=True) embed.add_field(name="**Today Cases**", value=todayCases, inline=True) embed.add_field(name="**Total Deaths**", value=totalDeaths, inline=True) embed.add_field(name="**Today Deaths**", value=todayDeaths, inline=True) embed.add_field(name="**Recovered**", value=recovered, inline=True) embed.add_field(name="**Active**", value=active, inline=True) embed.add_field(name="**Critical**", value=critical, inline=True) embed.add_field(name="**Cases Per One Million**", value=casesPerOneMillion, inline=True) embed.add_field(name="**Deaths Per One Million**", value=deathsPerOneMillion, inline=True) embed.add_field(name="**Total Tests**", value=totalTests, inline=True) embed.add_field(name="**Tests Per One Million**", value=testsPerOneMillion, inline=True) embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/564520348821749766/701422183217365052/2Q.png") embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url) await message.edit(embed=embed)
Python
async def global_(self, ctx): """Gets the global Covid-19 INFO""" em = discord.Embed(description="**Fetching information <a:loading:911568431315292211>**", color=discord.Color.green()) message = await ctx.send(embed=em) url = f"https://coronavirus-19-api.herokuapp.com/countries/world" stats = await self.bot.session.get(url) json_stats = await stats.json() totalCases = json_stats["cases"] todayCases = json_stats["todayCases"] totalDeaths = json_stats["deaths"] todayDeaths = json_stats["todayDeaths"] recovered = json_stats["recovered"] active = json_stats["active"] critical = json_stats["critical"] casesPerOneMillion = json_stats["casesPerOneMillion"] deathsPerOneMillion = json_stats["deathsPerOneMillion"] totalTests = json_stats["totalTests"] testsPerOneMillion = json_stats["testsPerOneMillion"] embed = discord.Embed(title=f"**Global Covid 19 Info**!", description="This Information Isn't Live Always, Hence It May Not Be Accurate!", colour=discord.Color.random()) embed.add_field(name="**Total Cases**", value=totalCases, inline=True) embed.add_field(name="**Today Cases**", value=todayCases, inline=True) embed.add_field(name="**Total Deaths**", value=totalDeaths, inline=True) embed.add_field(name="**Today Deaths**", value=todayDeaths, inline=True) embed.add_field(name="**Recovered**", value=recovered, inline=True) embed.add_field(name="**Active**", value=active, inline=True) embed.add_field(name="**Critical**", value=critical, inline=True) embed.add_field(name="**Cases Per One Million**", value=casesPerOneMillion, inline=True) embed.add_field(name="**Deaths Per One Million**", value=deathsPerOneMillion, inline=True) embed.add_field(name="**Total Tests**", value=totalTests, inline=True) embed.add_field(name="**Tests Per One Million**", value=testsPerOneMillion, inline=True) embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/564520348821749766/701422183217365052/2Q.png") embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url) await message.edit(embed=embed)
async def global_(self, ctx): """Gets the global Covid-19 INFO""" em = discord.Embed(description="**Fetching information <a:loading:911568431315292211>**", color=discord.Color.green()) message = await ctx.send(embed=em) url = f"https://coronavirus-19-api.herokuapp.com/countries/world" stats = await self.bot.session.get(url) json_stats = await stats.json() totalCases = json_stats["cases"] todayCases = json_stats["todayCases"] totalDeaths = json_stats["deaths"] todayDeaths = json_stats["todayDeaths"] recovered = json_stats["recovered"] active = json_stats["active"] critical = json_stats["critical"] casesPerOneMillion = json_stats["casesPerOneMillion"] deathsPerOneMillion = json_stats["deathsPerOneMillion"] totalTests = json_stats["totalTests"] testsPerOneMillion = json_stats["testsPerOneMillion"] embed = discord.Embed(title=f"**Global Covid 19 Info**!", description="This Information Isn't Live Always, Hence It May Not Be Accurate!", colour=discord.Color.random()) embed.add_field(name="**Total Cases**", value=totalCases, inline=True) embed.add_field(name="**Today Cases**", value=todayCases, inline=True) embed.add_field(name="**Total Deaths**", value=totalDeaths, inline=True) embed.add_field(name="**Today Deaths**", value=todayDeaths, inline=True) embed.add_field(name="**Recovered**", value=recovered, inline=True) embed.add_field(name="**Active**", value=active, inline=True) embed.add_field(name="**Critical**", value=critical, inline=True) embed.add_field(name="**Cases Per One Million**", value=casesPerOneMillion, inline=True) embed.add_field(name="**Deaths Per One Million**", value=deathsPerOneMillion, inline=True) embed.add_field(name="**Total Tests**", value=totalTests, inline=True) embed.add_field(name="**Tests Per One Million**", value=testsPerOneMillion, inline=True) embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/564520348821749766/701422183217365052/2Q.png") embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url) await message.edit(embed=embed)
Python
async def bump_(self, ctx: commands.Context, switch: str): """Turns Bump reminder on in the server""" self.bot.dbcursor.execute(f'SELECT bump FROM settings WHERE guild_id=?', (ctx.guild.id,)) data = self.bot.dbcursor.fetchone() if not data: self.bot.dbcursor.execute('INSERT INTO settings (guild_id, bump) VALUES(?,?)', (ctx.guild.id, "off")) if switch.lower() == "on" or switch.lower() == "enable" or switch.lower() == "yes": self.bot.dbcursor.execute(f'UPDATE settings SET bump = "on" WHERE guild_id=?', (ctx.guild.id,)) if switch.lower() == "off" or switch.lower() == "disable" or switch.lower() == "no": self.bot.dbcursor.execute(f'UPDATE settings SET bump = "off" WHERE guild_id=?', (ctx.guild.id,)) if switch.lower() == "on" or switch.lower() == "enable" or switch.lower() == "yes": embed = discord.Embed(description="**<:tick:897382645321850920> Enabled `bump reminding` service!**", color=discord.Color.green()) await ctx.send(embed=embed) if switch.lower() == "off" or switch.lower() == "disable" or switch.lower() == "no": embed = discord.Embed(description="**<:tick:897382645321850920> Disabled `bump reminding` service!**", color=discord.Color.green()) await ctx.send(embed=embed)
async def bump_(self, ctx: commands.Context, switch: str): """Turns Bump reminder on in the server""" self.bot.dbcursor.execute(f'SELECT bump FROM settings WHERE guild_id=?', (ctx.guild.id,)) data = self.bot.dbcursor.fetchone() if not data: self.bot.dbcursor.execute('INSERT INTO settings (guild_id, bump) VALUES(?,?)', (ctx.guild.id, "off")) if switch.lower() == "on" or switch.lower() == "enable" or switch.lower() == "yes": self.bot.dbcursor.execute(f'UPDATE settings SET bump = "on" WHERE guild_id=?', (ctx.guild.id,)) if switch.lower() == "off" or switch.lower() == "disable" or switch.lower() == "no": self.bot.dbcursor.execute(f'UPDATE settings SET bump = "off" WHERE guild_id=?', (ctx.guild.id,)) if switch.lower() == "on" or switch.lower() == "enable" or switch.lower() == "yes": embed = discord.Embed(description="**<:tick:897382645321850920> Enabled `bump reminding` service!**", color=discord.Color.green()) await ctx.send(embed=embed) if switch.lower() == "off" or switch.lower() == "disable" or switch.lower() == "no": embed = discord.Embed(description="**<:tick:897382645321850920> Disabled `bump reminding` service!**", color=discord.Color.green()) await ctx.send(embed=embed)
Python
async def memberCheck(guild: discord.Guild) -> List[int]: """Returns the memberList which contains memberIDs of all members combined""" memberList = [] for member in guild.members: memberList.append(member.id) return memberList
async def memberCheck(guild: discord.Guild) -> List[int]: """Returns the memberList which contains memberIDs of all members combined""" memberList = [] for member in guild.members: memberList.append(member.id) return memberList
Python
async def help(ctx: discord.ApplicationContext): """Get help about the most feature packed bot!!""" await ctx.respond(embed=Help_Embed(), view=HelpOptions()) message = await ctx.interaction.original_message() await asyncio.sleep(120) try: await message.edit("This help session expired", embed=None, view=None) except: pass
async def help(ctx: discord.ApplicationContext): """Get help about the most feature packed bot!!""" await ctx.respond(embed=Help_Embed(), view=HelpOptions()) message = await ctx.interaction.original_message() await asyncio.sleep(120) try: await message.edit("This help session expired", embed=None, view=None) except: pass
Python
async def tictactoe_(self, ctx: commands.Context, user: discord.Member): """Play a tic-tac-toe Game with someone online""" if user is None: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908? You can't play tic-tac-toe alone!**", color=discord.Color.red())) if user.bot: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> You can't play with a bot!**", color=discord.Color.red())) players = { str(ctx.author.id): str(user.id), str(user.id): str(ctx.author.id) } player1 = random.choice(list(players.keys())) player2 = players[player1] msg = await ctx.send(f"{ctx.guild.get_member(int(player1)).mention}\'s turn (X)") await msg.edit(view=TicTacToe( player1=ctx.guild.get_member(int(player1)), player2=ctx.guild.get_member(int(player2)), message=msg ))
async def tictactoe_(self, ctx: commands.Context, user: discord.Member): """Play a tic-tac-toe Game with someone online""" if user is None: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908? You can't play tic-tac-toe alone!**", color=discord.Color.red())) if user.bot: return await ctx.send(embed=discord.Embed(description="**<:error:897382665781669908> You can't play with a bot!**", color=discord.Color.red())) players = { str(ctx.author.id): str(user.id), str(user.id): str(ctx.author.id) } player1 = random.choice(list(players.keys())) player2 = players[player1] msg = await ctx.send(f"{ctx.guild.get_member(int(player1)).mention}\'s turn (X)") await msg.edit(view=TicTacToe( player1=ctx.guild.get_member(int(player1)), player2=ctx.guild.get_member(int(player2)), message=msg ))
Python
async def beer_(self, ctx: commands.Context, user: discord.Member=None): """Have a beer with yourself or someone else\nExample: `beer` for having a drink and `beer [user]` to have a drink with someone""" user = user if user else ctx.author if user.bot: embed = discord.Embed(description=f"**Hey {ctx.author.mention},\nbots don't have beer!\n--------------------------\nTry having it with a human!**", color=discord.Color.red()) await ctx.send(embed=embed) return if user == ctx.author: embed = discord.Embed(description=f"**{ctx.author.mention} is having a great time drinking lonely! :beer:**", color=discord.Color.green()) await ctx.send(embed=embed) else: embed = discord.Embed(description=f"**{ctx.author.mention} has invited {user.mention} for having a drink together :beers:!\n------------------------------------\nWould you like to join {user.mention}?**", color=discord.Color.orange()) message = await ctx.send(content=f"{user.mention}", embed=embed) await message.edit(embed=embed, view=BeerView(user, ctx, message))
async def beer_(self, ctx: commands.Context, user: discord.Member=None): """Have a beer with yourself or someone else\nExample: `beer` for having a drink and `beer [user]` to have a drink with someone""" user = user if user else ctx.author if user.bot: embed = discord.Embed(description=f"**Hey {ctx.author.mention},\nbots don't have beer!\n--------------------------\nTry having it with a human!**", color=discord.Color.red()) await ctx.send(embed=embed) return if user == ctx.author: embed = discord.Embed(description=f"**{ctx.author.mention} is having a great time drinking lonely! :beer:**", color=discord.Color.green()) await ctx.send(embed=embed) else: embed = discord.Embed(description=f"**{ctx.author.mention} has invited {user.mention} for having a drink together :beers:!\n------------------------------------\nWould you like to join {user.mention}?**", color=discord.Color.orange()) message = await ctx.send(content=f"{user.mention}", embed=embed) await message.edit(embed=embed, view=BeerView(user, ctx, message))
Python
async def beerparty_(self, ctx: commands.Context): """Have a beerparty in the server. Invite your friends!""" embed = discord.Embed(title="Beer Party 🍻", description=f"{ctx.author.mention} had invited everyone to join up this beer party :beers:!", color=discord.Color.green()) message = await ctx.send(embed=embed) await message.edit(embed=embed, view=BeerPartyView(message, ctx))
async def beerparty_(self, ctx: commands.Context): """Have a beerparty in the server. Invite your friends!""" embed = discord.Embed(title="Beer Party 🍻", description=f"{ctx.author.mention} had invited everyone to join up this beer party :beers:!", color=discord.Color.green()) message = await ctx.send(embed=embed) await message.edit(embed=embed, view=BeerPartyView(message, ctx))
Python
async def akinator_(self, ctx: commands.Context): """Play a game of akinator\nHow to play: Think of a character it can either be a fictional or non-fictional character.\nThe bot will ask questions, just give them the right answer!""" await ctx.send(embed=discord.Embed(description="**Yukinator is here to guess!\n--------------------------------\nOptions: y: `yes\n`no: `n`\nidk: `Don't know`\np: `probably`\npn: `probably not`\nb: `previous question`\nq: `quit the game`**", color=discord.Color.green()).set_image(url="https://static.wikia.nocookie.net/video-game-character-database/images/9/9f/Akinator.png/revision/latest?cb=20200817020737")) def check(msg): return ( msg.author == ctx.author and msg.channel == ctx.channel and msg.content.lower() in ["y", "n", "idk", "p", "pn", "b", "q"] ) try: aki = ak.Akinator() q = aki.start_game() while aki.progression <= 80: await ctx.send(embed=discord.Embed(description=f"**{q}\n\n[`y` | `n` | `idk` | `p` | `pn` | `b` | `q`]**", color=discord.Color.embed_background(theme="dark"))) try: msg = await self.bot.wait_for("message", check=check, timeout=60) if msg.content.lower() == "q": await ctx.send(embed=discord.Embed(description="**You have quit the game!**", color=discord.Color.red())) break if msg.content.lower() == "b": try: q = aki.back() except ak.CantGoBackAnyFurther: await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> {e}**")) continue else: try: q = aki.answer(msg.content.lower()) except ak.InvalidAnswerError as e: await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> {e}**")) continue except asyncio.TimeoutError: return await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> The game timed-out.. try plsying a new one**")) except Exception as e: await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> An error occured\n`{str(e).capitalize()}`**")) aki.win() await ctx.send( embed=discord.Embed(description=f"**Is it {aki.first_guess['name']}\n({aki.first_guess['description']})!\nWas I correct?(y/n)\n\t**", color=discord.Color.orange()).set_image(url=aki.first_guess['absolute_picture_path']) ) correct = await self.bot.wait_for("message", check=check) if correct.content.lower() == "y": await ctx.send(embed=discord.Embed(description="**Yay!**", color=discord.Color.green())) else: await ctx.send(embed=discord.Embed(description="**Oof!**", color=discord.Color.red())) except Exception as e: await ctx.send(e)
async def akinator_(self, ctx: commands.Context): """Play a game of akinator\nHow to play: Think of a character it can either be a fictional or non-fictional character.\nThe bot will ask questions, just give them the right answer!""" await ctx.send(embed=discord.Embed(description="**Yukinator is here to guess!\n--------------------------------\nOptions: y: `yes\n`no: `n`\nidk: `Don't know`\np: `probably`\npn: `probably not`\nb: `previous question`\nq: `quit the game`**", color=discord.Color.green()).set_image(url="https://static.wikia.nocookie.net/video-game-character-database/images/9/9f/Akinator.png/revision/latest?cb=20200817020737")) def check(msg): return ( msg.author == ctx.author and msg.channel == ctx.channel and msg.content.lower() in ["y", "n", "idk", "p", "pn", "b", "q"] ) try: aki = ak.Akinator() q = aki.start_game() while aki.progression <= 80: await ctx.send(embed=discord.Embed(description=f"**{q}\n\n[`y` | `n` | `idk` | `p` | `pn` | `b` | `q`]**", color=discord.Color.embed_background(theme="dark"))) try: msg = await self.bot.wait_for("message", check=check, timeout=60) if msg.content.lower() == "q": await ctx.send(embed=discord.Embed(description="**You have quit the game!**", color=discord.Color.red())) break if msg.content.lower() == "b": try: q = aki.back() except ak.CantGoBackAnyFurther: await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> {e}**")) continue else: try: q = aki.answer(msg.content.lower()) except ak.InvalidAnswerError as e: await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> {e}**")) continue except asyncio.TimeoutError: return await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> The game timed-out.. try plsying a new one**")) except Exception as e: await ctx.send(embed=discord.Embed(description=f"**<:error:897382665781669908> An error occured\n`{str(e).capitalize()}`**")) aki.win() await ctx.send( embed=discord.Embed(description=f"**Is it {aki.first_guess['name']}\n({aki.first_guess['description']})!\nWas I correct?(y/n)\n\t**", color=discord.Color.orange()).set_image(url=aki.first_guess['absolute_picture_path']) ) correct = await self.bot.wait_for("message", check=check) if correct.content.lower() == "y": await ctx.send(embed=discord.Embed(description="**Yay!**", color=discord.Color.green())) else: await ctx.send(embed=discord.Embed(description="**Oof!**", color=discord.Color.red())) except Exception as e: await ctx.send(e)
Python
async def gayrate(self, ctx, user: discord.Member=None): """View your's or someone's gayrate""" user = user if user else ctx.author if user.bot: embed = discord.Embed(description="**<:error:897382665781669908> How lame of you! | This machine is for humans!\nNot for bots, You fool.**", color=discord.Color.red()) return await ctx.send(embed=embed) gayrate = random.randint(1, 100) if gayrate >= 90: embed = discord.Embed(title="Yuki's Gayr8 Machine!", description=f"**The MACHINE Broke :slot_machine:!\n\n{user.mention}**'s gayr8: **{gayrate}**%", color=discord.Color.embed_background(theme="dark")) embed.set_image(url="https://media.giphy.com/media/j2es27Xohj5EMK6G8c/giphy.gif") return await ctx.send(embed=embed) else: embed = discord.Embed(title="Yuki's Gayr8 Machine!", description=f"**{user.mention}**'s gayr8: **{gayrate}**%", color=discord.Color.dark_purple()) return await ctx.send(embed=embed)
async def gayrate(self, ctx, user: discord.Member=None): """View your's or someone's gayrate""" user = user if user else ctx.author if user.bot: embed = discord.Embed(description="**<:error:897382665781669908> How lame of you! | This machine is for humans!\nNot for bots, You fool.**", color=discord.Color.red()) return await ctx.send(embed=embed) gayrate = random.randint(1, 100) if gayrate >= 90: embed = discord.Embed(title="Yuki's Gayr8 Machine!", description=f"**The MACHINE Broke :slot_machine:!\n\n{user.mention}**'s gayr8: **{gayrate}**%", color=discord.Color.embed_background(theme="dark")) embed.set_image(url="https://media.giphy.com/media/j2es27Xohj5EMK6G8c/giphy.gif") return await ctx.send(embed=embed) else: embed = discord.Embed(title="Yuki's Gayr8 Machine!", description=f"**{user.mention}**'s gayr8: **{gayrate}**%", color=discord.Color.dark_purple()) return await ctx.send(embed=embed)
Python
async def send_command_help(self, command): """triggers when a `<prefix>help <command>` is called""" try: ctx = self.context signature = self.get_command_signature( command ) embed = HelpEmbed( title=signature, description=command.help or "No help found..." ) if cog := command.cog: embed.add_field(name="Category", value=cog.qualified_name) if command._buckets and (cooldown := command._buckets._cooldown): embed.add_field( name="Cooldown", value=f"{cooldown.rate} per {cooldown.per:.0f} seconds", ) await ctx.send(embed=embed) except Exception as e: print(e)
async def send_command_help(self, command): """triggers when a `<prefix>help <command>` is called""" try: ctx = self.context signature = self.get_command_signature( command ) embed = HelpEmbed( title=signature, description=command.help or "No help found..." ) if cog := command.cog: embed.add_field(name="Category", value=cog.qualified_name) if command._buckets and (cooldown := command._buckets._cooldown): embed.add_field( name="Cooldown", value=f"{cooldown.rate} per {cooldown.per:.0f} seconds", ) await ctx.send(embed=embed) except Exception as e: print(e)
Python
async def botinfo_(self, ctx: commands.Context): """View the bot's info""" memory = "{:.4} MB".format(psutil.Process().memory_info().rss / 1024 ** 2) py_ver = ".".join([str(v) for v in sys.version_info[:3]]) uptime = str(datetime.timedelta(seconds=int(round(time.time()-startTime)))) embed = discord.Embed(title="Bot Info!", description=f"**Servers**\n{len(list(self.bot.guilds))}\n\n**Users**\n{members(self.bot)}\n\n**System**\n{platform.release()}\n\n**Memory**\n{memory}\n\n**Python Version**\n{py_ver}\n\n**Uptime**\n{uptime}\n\n**Owner/Creator**\n27Saumya", color=discord.Color.green()) embed.set_thumbnail(url=self.bot.user.avatar.url) await ctx.send(embed=embed)
async def botinfo_(self, ctx: commands.Context): """View the bot's info""" memory = "{:.4} MB".format(psutil.Process().memory_info().rss / 1024 ** 2) py_ver = ".".join([str(v) for v in sys.version_info[:3]]) uptime = str(datetime.timedelta(seconds=int(round(time.time()-startTime)))) embed = discord.Embed(title="Bot Info!", description=f"**Servers**\n{len(list(self.bot.guilds))}\n\n**Users**\n{members(self.bot)}\n\n**System**\n{platform.release()}\n\n**Memory**\n{memory}\n\n**Python Version**\n{py_ver}\n\n**Uptime**\n{uptime}\n\n**Owner/Creator**\n27Saumya", color=discord.Color.green()) embed.set_thumbnail(url=self.bot.user.avatar.url) await ctx.send(embed=embed)
Python
async def ping_(self, ctx: commands.Context): """View the bot's latency (Edit Latency)""" before = time.monotonic() embed = discord.Embed(title=":ping_pong:", color=discord.Color.green()) message = await ctx.send(embed=embed) ping = (time.monotonic() - before) * 1000 embed2 = discord.Embed(title=":ping_pong: Pong!", description=f"**Bot latency: `{round(self.bot.latency * 1000)}` ms\n------------------------------\nDiscord Latency: `{int(ping)}` ms**", color=discord.Color.green()) await message.edit(embed=embed2)
async def ping_(self, ctx: commands.Context): """View the bot's latency (Edit Latency)""" before = time.monotonic() embed = discord.Embed(title=":ping_pong:", color=discord.Color.green()) message = await ctx.send(embed=embed) ping = (time.monotonic() - before) * 1000 embed2 = discord.Embed(title=":ping_pong: Pong!", description=f"**Bot latency: `{round(self.bot.latency * 1000)}` ms\n------------------------------\nDiscord Latency: `{int(ping)}` ms**", color=discord.Color.green()) await message.edit(embed=embed2)
Python
def sum_then_finalize( metric_finalizers: model_lib.MetricFinalizersType, local_unfinalized_metrics_type: computation_types.StructWithPythonType ) -> computation_base.Computation: """Creates a TFF computation that aggregates metrics via `sum_then_finalize`. The returned federated TFF computation has the following type signature: `local_unfinalized_metrics@CLIENTS -> aggregated_metrics@SERVER`, where the input is given by `tff.learning.Model.report_local_unfinalized_metrics()` at `CLIENTS`, and the output is computed by first summing the unfinalized metrics from `CLIENTS`, followed by applying the finalizers at `SERVER`. Args: metric_finalizers: An `OrderedDict` of `string` metric names to finalizer functions returned by `tff.learning.Model.metric_finalizers()`. It should have the same keys (i.e., metric names) as the `OrderedDict` returned by `tff.learning.Model.report_local_unfinalized_metrics()`. A finalizer is a callable (typically `tf.function` or `tff.tf_computation` decoreated function) that takes in a metric's unfinalized values, and returns the finalized values. local_unfinalized_metrics_type: A `tff.types.StructWithPythonType` (with `OrderedDict` as the Python container) of a client's local unfinalized metrics. Let `local_unfinalized_metrics` be the output of `tff.learning.Model.report_local_unfinalized_metrics()`. Its type can be obtained by `tff.framework.type_from_tensors(local_unfinalized_metrics)`. Returns: A federated TFF computation that sums the unfinalized metrics from CLIENTS, and applies the correponding finalizers at SERVER. Raises: TypeError: If the inputs are of the wrong types. ValueError: If the keys (i.e., metric names) in `metric_finalizers` are not the same as those expected by `local_unfinalized_metrics_type`. """ py_typecheck.check_type(metric_finalizers, collections.OrderedDict, 'metric_finalizers') for key, value in metric_finalizers.items(): py_typecheck.check_type(key, str, f'metric_finalizers key {key}') py_typecheck.check_callable(value, f'metric_finalizers value {value}') # Directly check the type (instead of using `py_typecheck`) here so that the # the error message has a better format (specifically, the expected type is # shown as `tff.types.StructWithPythonType` in the error message). if not isinstance(local_unfinalized_metrics_type, computation_types.StructWithPythonType): raise TypeError( 'Expected the input `local_unfinalized_metrics_type` to be a ' '`tff.types.StructWithPythonType`, found ' f'{py_typecheck.type_string(type(local_unfinalized_metrics_type))}.') local_metrics_container = local_unfinalized_metrics_type.python_container if local_metrics_container is not collections.OrderedDict: raise TypeError( 'Expected the input `local_unfinalized_metrics_type` to be a ' '`tff.types.StructWithPythonType` with `collections.OrderedDict` as ' 'the Python container, found a `tff.types.StructWithPythonType` with ' f'Python container {py_typecheck.type_string(local_metrics_container)}.' ) metric_names_in_metric_finalizers = set(metric_finalizers.keys()) metric_names_in_local_unfinalized_metrics = set( structure.name_list(local_unfinalized_metrics_type)) if (metric_names_in_metric_finalizers != metric_names_in_local_unfinalized_metrics): difference_1 = ( metric_names_in_metric_finalizers - metric_names_in_local_unfinalized_metrics) difference_2 = ( metric_names_in_local_unfinalized_metrics - metric_names_in_metric_finalizers) raise ValueError( 'The metric names in `metric_finalizers` do not match those in the ' '`local_unfinalized_metrics`. Metric names in the `metric_finalizers`' f'but not the `local_unfinalized_metrics`: {difference_1}. ' 'Metric names in the `local_unfinalized_metrics` but not the ' f'`metric_finalizers`: {difference_2}.\n' 'Metrics names in the `metric_finalizers`: ' f'{metric_names_in_metric_finalizers}. Metric names in the ' '`local_unfinalized_metrics`: ' f'{metric_names_in_local_unfinalized_metrics}.') @computations.federated_computation( computation_types.at_clients(local_unfinalized_metrics_type)) def aggregator_computation(client_local_unfinalized_metrics): unfinalized_metrics_sum = intrinsics.federated_sum( client_local_unfinalized_metrics) @computations.tf_computation(local_unfinalized_metrics_type) def finalizer_computation(unfinalized_metrics): finalized_metrics = collections.OrderedDict() for metric_name, metric_finalizer in metric_finalizers.items(): finalized_metrics[metric_name] = metric_finalizer( unfinalized_metrics[metric_name]) return finalized_metrics return intrinsics.federated_map(finalizer_computation, unfinalized_metrics_sum) return aggregator_computation
def sum_then_finalize( metric_finalizers: model_lib.MetricFinalizersType, local_unfinalized_metrics_type: computation_types.StructWithPythonType ) -> computation_base.Computation: """Creates a TFF computation that aggregates metrics via `sum_then_finalize`. The returned federated TFF computation has the following type signature: `local_unfinalized_metrics@CLIENTS -> aggregated_metrics@SERVER`, where the input is given by `tff.learning.Model.report_local_unfinalized_metrics()` at `CLIENTS`, and the output is computed by first summing the unfinalized metrics from `CLIENTS`, followed by applying the finalizers at `SERVER`. Args: metric_finalizers: An `OrderedDict` of `string` metric names to finalizer functions returned by `tff.learning.Model.metric_finalizers()`. It should have the same keys (i.e., metric names) as the `OrderedDict` returned by `tff.learning.Model.report_local_unfinalized_metrics()`. A finalizer is a callable (typically `tf.function` or `tff.tf_computation` decoreated function) that takes in a metric's unfinalized values, and returns the finalized values. local_unfinalized_metrics_type: A `tff.types.StructWithPythonType` (with `OrderedDict` as the Python container) of a client's local unfinalized metrics. Let `local_unfinalized_metrics` be the output of `tff.learning.Model.report_local_unfinalized_metrics()`. Its type can be obtained by `tff.framework.type_from_tensors(local_unfinalized_metrics)`. Returns: A federated TFF computation that sums the unfinalized metrics from CLIENTS, and applies the correponding finalizers at SERVER. Raises: TypeError: If the inputs are of the wrong types. ValueError: If the keys (i.e., metric names) in `metric_finalizers` are not the same as those expected by `local_unfinalized_metrics_type`. """ py_typecheck.check_type(metric_finalizers, collections.OrderedDict, 'metric_finalizers') for key, value in metric_finalizers.items(): py_typecheck.check_type(key, str, f'metric_finalizers key {key}') py_typecheck.check_callable(value, f'metric_finalizers value {value}') # Directly check the type (instead of using `py_typecheck`) here so that the # the error message has a better format (specifically, the expected type is # shown as `tff.types.StructWithPythonType` in the error message). if not isinstance(local_unfinalized_metrics_type, computation_types.StructWithPythonType): raise TypeError( 'Expected the input `local_unfinalized_metrics_type` to be a ' '`tff.types.StructWithPythonType`, found ' f'{py_typecheck.type_string(type(local_unfinalized_metrics_type))}.') local_metrics_container = local_unfinalized_metrics_type.python_container if local_metrics_container is not collections.OrderedDict: raise TypeError( 'Expected the input `local_unfinalized_metrics_type` to be a ' '`tff.types.StructWithPythonType` with `collections.OrderedDict` as ' 'the Python container, found a `tff.types.StructWithPythonType` with ' f'Python container {py_typecheck.type_string(local_metrics_container)}.' ) metric_names_in_metric_finalizers = set(metric_finalizers.keys()) metric_names_in_local_unfinalized_metrics = set( structure.name_list(local_unfinalized_metrics_type)) if (metric_names_in_metric_finalizers != metric_names_in_local_unfinalized_metrics): difference_1 = ( metric_names_in_metric_finalizers - metric_names_in_local_unfinalized_metrics) difference_2 = ( metric_names_in_local_unfinalized_metrics - metric_names_in_metric_finalizers) raise ValueError( 'The metric names in `metric_finalizers` do not match those in the ' '`local_unfinalized_metrics`. Metric names in the `metric_finalizers`' f'but not the `local_unfinalized_metrics`: {difference_1}. ' 'Metric names in the `local_unfinalized_metrics` but not the ' f'`metric_finalizers`: {difference_2}.\n' 'Metrics names in the `metric_finalizers`: ' f'{metric_names_in_metric_finalizers}. Metric names in the ' '`local_unfinalized_metrics`: ' f'{metric_names_in_local_unfinalized_metrics}.') @computations.federated_computation( computation_types.at_clients(local_unfinalized_metrics_type)) def aggregator_computation(client_local_unfinalized_metrics): unfinalized_metrics_sum = intrinsics.federated_sum( client_local_unfinalized_metrics) @computations.tf_computation(local_unfinalized_metrics_type) def finalizer_computation(unfinalized_metrics): finalized_metrics = collections.OrderedDict() for metric_name, metric_finalizer in metric_finalizers.items(): finalized_metrics[metric_name] = metric_finalizer( unfinalized_metrics[metric_name]) return finalized_metrics return intrinsics.federated_map(finalizer_computation, unfinalized_metrics_sum) return aggregator_computation
Python
def build_proximal_client_update_with_tff_optimizer( model_fn, proximal_strength: float, use_experimental_simulation_loop: bool = False): """Creates client update logic in FedProx using a TFF optimizer. In contrast to using a `tf.keras.optimizers.Optimizer`, we avoid creating `tf.Variable`s associated with the optimizer state within the scope of the client work, as they are not necessary. This also means that the client's model weights are updated by computing `optimizer.next` and then assigning the result to the model weights (while a `tf.keras.optimizers.Optimizer` will modify the model weight in place using `optimizer.apply_gradients`). Args: model_fn: A no-arg callable returning a `tff.learning.Model`. proximal_strength: A nonnegative float representing the parameter of FedProx's regularization term. When set to `0`, the client update reduces to that of FedAvg. Higher values prevent clients from moving too far from the server model during local training. use_experimental_simulation_loop: Controls the reduce loop function for the input dataset. An experimental reduce loop is used for simulation. Returns: A `tf.function`. """ model = model_fn() dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn( use_experimental_simulation_loop) @tf.function def client_update(optimizer, initial_weights, data): model_weights = model_utils.ModelWeights.from_model(model) tf.nest.map_structure(lambda a, b: a.assign(b), model_weights, initial_weights) def reduce_fn(state, batch): """Trains a `tff.learning.Model` on a batch of data.""" num_examples_sum, optimizer_state = state with tf.GradientTape() as tape: output = model.forward_pass(batch, training=True) gradients = tape.gradient(output.loss, model_weights.trainable) proximal_delta = tf.nest.map_structure(tf.subtract, model_weights.trainable, initial_weights.trainable) proximal_term = tf.nest.map_structure(lambda x: proximal_strength * x, proximal_delta) gradients = tf.nest.map_structure(tf.add, gradients, proximal_term) optimizer_state, updated_weights = optimizer.next(optimizer_state, model_weights.trainable, gradients) tf.nest.map_structure(lambda a, b: a.assign(b), model_weights.trainable, updated_weights) if output.num_examples is None: num_examples_sum += tf.shape(output.predictions, out_type=tf.int64)[0] else: num_examples_sum += tf.cast(output.num_examples, tf.int64) return num_examples_sum, optimizer_state def initial_state_for_reduce_fn(): trainable_tensor_specs = tf.nest.map_structure( lambda v: tf.TensorSpec(v.shape, v.dtype), model_weights.trainable) return tf.zeros( shape=[], dtype=tf.int64), optimizer.initialize(trainable_tensor_specs) num_examples, _ = dataset_reduce_fn( reduce_fn, data, initial_state_fn=initial_state_for_reduce_fn) client_update = tf.nest.map_structure(tf.subtract, initial_weights.trainable, model_weights.trainable) model_output = model.report_local_outputs() stat_output = collections.OrderedDict(num_examples=num_examples) # TODO(b/122071074): Consider moving this functionality into # tff.federated_mean? client_update, has_non_finite_delta = ( tensor_utils.zero_all_if_any_non_finite(client_update)) # Zero out the weight if there are any non-finite values. if has_non_finite_delta > 0: client_weight = tf.constant(0.0) else: client_weight = tf.cast(num_examples, tf.float32) return client_works.ClientResult( update=client_update, update_weight=client_weight), model_output, stat_output return client_update
def build_proximal_client_update_with_tff_optimizer( model_fn, proximal_strength: float, use_experimental_simulation_loop: bool = False): """Creates client update logic in FedProx using a TFF optimizer. In contrast to using a `tf.keras.optimizers.Optimizer`, we avoid creating `tf.Variable`s associated with the optimizer state within the scope of the client work, as they are not necessary. This also means that the client's model weights are updated by computing `optimizer.next` and then assigning the result to the model weights (while a `tf.keras.optimizers.Optimizer` will modify the model weight in place using `optimizer.apply_gradients`). Args: model_fn: A no-arg callable returning a `tff.learning.Model`. proximal_strength: A nonnegative float representing the parameter of FedProx's regularization term. When set to `0`, the client update reduces to that of FedAvg. Higher values prevent clients from moving too far from the server model during local training. use_experimental_simulation_loop: Controls the reduce loop function for the input dataset. An experimental reduce loop is used for simulation. Returns: A `tf.function`. """ model = model_fn() dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn( use_experimental_simulation_loop) @tf.function def client_update(optimizer, initial_weights, data): model_weights = model_utils.ModelWeights.from_model(model) tf.nest.map_structure(lambda a, b: a.assign(b), model_weights, initial_weights) def reduce_fn(state, batch): """Trains a `tff.learning.Model` on a batch of data.""" num_examples_sum, optimizer_state = state with tf.GradientTape() as tape: output = model.forward_pass(batch, training=True) gradients = tape.gradient(output.loss, model_weights.trainable) proximal_delta = tf.nest.map_structure(tf.subtract, model_weights.trainable, initial_weights.trainable) proximal_term = tf.nest.map_structure(lambda x: proximal_strength * x, proximal_delta) gradients = tf.nest.map_structure(tf.add, gradients, proximal_term) optimizer_state, updated_weights = optimizer.next(optimizer_state, model_weights.trainable, gradients) tf.nest.map_structure(lambda a, b: a.assign(b), model_weights.trainable, updated_weights) if output.num_examples is None: num_examples_sum += tf.shape(output.predictions, out_type=tf.int64)[0] else: num_examples_sum += tf.cast(output.num_examples, tf.int64) return num_examples_sum, optimizer_state def initial_state_for_reduce_fn(): trainable_tensor_specs = tf.nest.map_structure( lambda v: tf.TensorSpec(v.shape, v.dtype), model_weights.trainable) return tf.zeros( shape=[], dtype=tf.int64), optimizer.initialize(trainable_tensor_specs) num_examples, _ = dataset_reduce_fn( reduce_fn, data, initial_state_fn=initial_state_for_reduce_fn) client_update = tf.nest.map_structure(tf.subtract, initial_weights.trainable, model_weights.trainable) model_output = model.report_local_outputs() stat_output = collections.OrderedDict(num_examples=num_examples) # TODO(b/122071074): Consider moving this functionality into # tff.federated_mean? client_update, has_non_finite_delta = ( tensor_utils.zero_all_if_any_non_finite(client_update)) # Zero out the weight if there are any non-finite values. if has_non_finite_delta > 0: client_weight = tf.constant(0.0) else: client_weight = tf.cast(num_examples, tf.float32) return client_works.ClientResult( update=client_update, update_weight=client_weight), model_output, stat_output return client_update
Python
def build_proximal_client_update_with_keras_optimizer( model_fn, proximal_strength: float, use_experimental_simulation_loop: bool = False): """Creates client update logic in FedProx using a `tf.keras` optimizer. In contrast to using a `tff.learning.optimizers.Optimizer`, we have to maintain `tf.Variable`s associated with the optimizer state within the scope of the client work. Additionally, the client model weights are modified in place by using `optimizer.apply_gradients`). Args: model_fn: A no-arg callable returning a `tff.learning.Model`. proximal_strength: A nonnegative float representing the parameter of FedProx's regularization term. When set to `0`, the client update reduces to that of FedAvg. Higher values prevent clients from moving too far from the server model during local training. use_experimental_simulation_loop: Controls the reduce loop function for the input dataset. An experimental reduce loop is used for simulation. Returns: A `tf.function`. """ model = model_fn() dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn( use_experimental_simulation_loop) @tf.function def client_update(optimizer, initial_weights, data): model_weights = model_utils.ModelWeights.from_model(model) tf.nest.map_structure(lambda a, b: a.assign(b), model_weights, initial_weights) def reduce_fn(num_examples_sum, batch): """Trains a `tff.learning.Model` on a batch of data.""" with tf.GradientTape() as tape: output = model.forward_pass(batch, training=True) gradients = tape.gradient(output.loss, model_weights.trainable) proximal_delta = tf.nest.map_structure(tf.subtract, model_weights.trainable, initial_weights.trainable) proximal_term = tf.nest.map_structure(lambda x: proximal_strength * x, proximal_delta) gradients = tf.nest.map_structure(tf.add, gradients, proximal_term) grads_and_vars = zip(gradients, model_weights.trainable) optimizer.apply_gradients(grads_and_vars) # TODO(b/199782787): Add a unit test for a model that does not compute # `num_examples` in its forward pass. if output.num_examples is None: num_examples_sum += tf.shape(output.predictions, out_type=tf.int64)[0] else: num_examples_sum += tf.cast(output.num_examples, tf.int64) return num_examples_sum def initial_state_for_reduce_fn(): return tf.zeros(shape=[], dtype=tf.int64) num_examples = dataset_reduce_fn( reduce_fn, data, initial_state_fn=initial_state_for_reduce_fn) client_update = tf.nest.map_structure(tf.subtract, initial_weights.trainable, model_weights.trainable) model_output = model.report_local_outputs() stat_output = collections.OrderedDict(num_examples=num_examples) # TODO(b/122071074): Consider moving this functionality into # tff.federated_mean? client_update, has_non_finite_delta = ( tensor_utils.zero_all_if_any_non_finite(client_update)) # Zero out the weight if there are any non-finite values. if has_non_finite_delta > 0: client_weight = tf.constant(0.0) else: client_weight = tf.cast(num_examples, tf.float32) return client_works.ClientResult( update=client_update, update_weight=client_weight), model_output, stat_output return client_update
def build_proximal_client_update_with_keras_optimizer( model_fn, proximal_strength: float, use_experimental_simulation_loop: bool = False): """Creates client update logic in FedProx using a `tf.keras` optimizer. In contrast to using a `tff.learning.optimizers.Optimizer`, we have to maintain `tf.Variable`s associated with the optimizer state within the scope of the client work. Additionally, the client model weights are modified in place by using `optimizer.apply_gradients`). Args: model_fn: A no-arg callable returning a `tff.learning.Model`. proximal_strength: A nonnegative float representing the parameter of FedProx's regularization term. When set to `0`, the client update reduces to that of FedAvg. Higher values prevent clients from moving too far from the server model during local training. use_experimental_simulation_loop: Controls the reduce loop function for the input dataset. An experimental reduce loop is used for simulation. Returns: A `tf.function`. """ model = model_fn() dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn( use_experimental_simulation_loop) @tf.function def client_update(optimizer, initial_weights, data): model_weights = model_utils.ModelWeights.from_model(model) tf.nest.map_structure(lambda a, b: a.assign(b), model_weights, initial_weights) def reduce_fn(num_examples_sum, batch): """Trains a `tff.learning.Model` on a batch of data.""" with tf.GradientTape() as tape: output = model.forward_pass(batch, training=True) gradients = tape.gradient(output.loss, model_weights.trainable) proximal_delta = tf.nest.map_structure(tf.subtract, model_weights.trainable, initial_weights.trainable) proximal_term = tf.nest.map_structure(lambda x: proximal_strength * x, proximal_delta) gradients = tf.nest.map_structure(tf.add, gradients, proximal_term) grads_and_vars = zip(gradients, model_weights.trainable) optimizer.apply_gradients(grads_and_vars) # TODO(b/199782787): Add a unit test for a model that does not compute # `num_examples` in its forward pass. if output.num_examples is None: num_examples_sum += tf.shape(output.predictions, out_type=tf.int64)[0] else: num_examples_sum += tf.cast(output.num_examples, tf.int64) return num_examples_sum def initial_state_for_reduce_fn(): return tf.zeros(shape=[], dtype=tf.int64) num_examples = dataset_reduce_fn( reduce_fn, data, initial_state_fn=initial_state_for_reduce_fn) client_update = tf.nest.map_structure(tf.subtract, initial_weights.trainable, model_weights.trainable) model_output = model.report_local_outputs() stat_output = collections.OrderedDict(num_examples=num_examples) # TODO(b/122071074): Consider moving this functionality into # tff.federated_mean? client_update, has_non_finite_delta = ( tensor_utils.zero_all_if_any_non_finite(client_update)) # Zero out the weight if there are any non-finite values. if has_non_finite_delta > 0: client_weight = tf.constant(0.0) else: client_weight = tf.cast(num_examples, tf.float32) return client_works.ClientResult( update=client_update, update_weight=client_weight), model_output, stat_output return client_update
Python
def build_fed_prox_client_work( model_fn: Callable[[], model_lib.Model], proximal_strength: float, optimizer_fn: Union[optimizer_base.Optimizer, Callable[[], tf.keras.optimizers.Optimizer]], use_experimental_simulation_loop: bool = False ) -> client_works.ClientWorkProcess: """Creates a `ClientWorkProcess` for federated averaging. This client work is constructed in slightly different manners depending on whether `optimizer_fn` is a `tff.learning.optimizers.Optimizer`, or a no-arg callable returning a `tf.keras.optimizers.Optimizer`. If it is a `tff.learning.optimizers.Optimizer`, we avoid creating `tf.Variable`s associated with the optimizer state within the scope of the client work, as they are not necessary. This also means that the client's model weights are updated by computing `optimizer.next` and then assigning the result to the model weights (while a `tf.keras.optimizers.Optimizer` will modify the model weight in place using `optimizer.apply_gradients`). Args: model_fn: A no-arg function that returns a `tff.learning.Model`. This method must *not* capture TensorFlow tensors or variables and use them. The model must be constructed entirely from scratch on each invocation, returning the same pre-constructed model each call will result in an error. proximal_strength: A nonnegative float representing the parameter of FedProx's regularization term. When set to `0`, the algorithm reduces to FedAvg. Higher values prevent clients from moving too far from the server model during local training. optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-arg callable that returns a `tf.keras.Optimizer`. use_experimental_simulation_loop: Controls the reduce loop function for input dataset. An experimental reduce loop is used for simulation. It is currently necessary to set this flag to True for performant GPU simulations. Returns: A `ClientWorkProcess`. """ with tf.Graph().as_default(): # Wrap model construction in a graph to avoid polluting the global context # with variables created for this model. model = model_fn() data_type = computation_types.SequenceType(model.input_spec) weights_type = model_utils.weights_type_from_model(model) if isinstance(optimizer_fn, optimizer_base.Optimizer): @computations.tf_computation(weights_type, data_type) def client_update_computation(initial_model_weights, dataset): client_update = build_proximal_client_update_with_tff_optimizer( model_fn, proximal_strength, use_experimental_simulation_loop) return client_update(optimizer_fn, initial_model_weights, dataset) else: @computations.tf_computation(weights_type, data_type) def client_update_computation(initial_model_weights, dataset): optimizer = optimizer_fn() client_update = build_proximal_client_update_with_keras_optimizer( model_fn, proximal_strength, use_experimental_simulation_loop) return client_update(optimizer, initial_model_weights, dataset) @computations.federated_computation def init_fn(): return intrinsics.federated_value((), placements.SERVER) @computations.federated_computation( init_fn.type_signature.result, computation_types.at_clients(weights_type), computation_types.at_clients(data_type)) def next_fn(state, weights, client_data): client_result, model_outputs, stat_output = intrinsics.federated_map( client_update_computation, (weights, client_data)) train_metrics = model.federated_output_computation(model_outputs) stat_metrics = intrinsics.federated_sum(stat_output) measurements = intrinsics.federated_zip( collections.OrderedDict(train=train_metrics, stat=stat_metrics)) return measured_process.MeasuredProcessOutput(state, client_result, measurements) return client_works.ClientWorkProcess(init_fn, next_fn)
def build_fed_prox_client_work( model_fn: Callable[[], model_lib.Model], proximal_strength: float, optimizer_fn: Union[optimizer_base.Optimizer, Callable[[], tf.keras.optimizers.Optimizer]], use_experimental_simulation_loop: bool = False ) -> client_works.ClientWorkProcess: """Creates a `ClientWorkProcess` for federated averaging. This client work is constructed in slightly different manners depending on whether `optimizer_fn` is a `tff.learning.optimizers.Optimizer`, or a no-arg callable returning a `tf.keras.optimizers.Optimizer`. If it is a `tff.learning.optimizers.Optimizer`, we avoid creating `tf.Variable`s associated with the optimizer state within the scope of the client work, as they are not necessary. This also means that the client's model weights are updated by computing `optimizer.next` and then assigning the result to the model weights (while a `tf.keras.optimizers.Optimizer` will modify the model weight in place using `optimizer.apply_gradients`). Args: model_fn: A no-arg function that returns a `tff.learning.Model`. This method must *not* capture TensorFlow tensors or variables and use them. The model must be constructed entirely from scratch on each invocation, returning the same pre-constructed model each call will result in an error. proximal_strength: A nonnegative float representing the parameter of FedProx's regularization term. When set to `0`, the algorithm reduces to FedAvg. Higher values prevent clients from moving too far from the server model during local training. optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-arg callable that returns a `tf.keras.Optimizer`. use_experimental_simulation_loop: Controls the reduce loop function for input dataset. An experimental reduce loop is used for simulation. It is currently necessary to set this flag to True for performant GPU simulations. Returns: A `ClientWorkProcess`. """ with tf.Graph().as_default(): # Wrap model construction in a graph to avoid polluting the global context # with variables created for this model. model = model_fn() data_type = computation_types.SequenceType(model.input_spec) weights_type = model_utils.weights_type_from_model(model) if isinstance(optimizer_fn, optimizer_base.Optimizer): @computations.tf_computation(weights_type, data_type) def client_update_computation(initial_model_weights, dataset): client_update = build_proximal_client_update_with_tff_optimizer( model_fn, proximal_strength, use_experimental_simulation_loop) return client_update(optimizer_fn, initial_model_weights, dataset) else: @computations.tf_computation(weights_type, data_type) def client_update_computation(initial_model_weights, dataset): optimizer = optimizer_fn() client_update = build_proximal_client_update_with_keras_optimizer( model_fn, proximal_strength, use_experimental_simulation_loop) return client_update(optimizer, initial_model_weights, dataset) @computations.federated_computation def init_fn(): return intrinsics.federated_value((), placements.SERVER) @computations.federated_computation( init_fn.type_signature.result, computation_types.at_clients(weights_type), computation_types.at_clients(data_type)) def next_fn(state, weights, client_data): client_result, model_outputs, stat_output = intrinsics.federated_map( client_update_computation, (weights, client_data)) train_metrics = model.federated_output_computation(model_outputs) stat_metrics = intrinsics.federated_sum(stat_output) measurements = intrinsics.federated_zip( collections.OrderedDict(train=train_metrics, stat=stat_metrics)) return measured_process.MeasuredProcessOutput(state, client_result, measurements) return client_works.ClientWorkProcess(init_fn, next_fn)
Python
def build_example_weighted_fed_prox_process( model_fn: Callable[[], model_lib.Model], proximal_strength: float, client_optimizer_fn: Union[optimizer_base.Optimizer, Callable[[], tf.keras.optimizers.Optimizer]], server_optimizer_fn: Union[optimizer_base.Optimizer, Callable[ [], tf.keras.optimizers.Optimizer]] = DEFAULT_SERVER_OPTIMIZER_FN, distributor: Optional[distributors.DistributionProcess] = None, model_update_aggregation_factory: Optional[ factory.WeightedAggregationFactory] = None, use_experimental_simulation_loop: bool = False ) -> learning_process.LearningProcess: """Builds a learning process that performs the FedProx algorithm. This function creates a `LearningProcess` that performs example-weighted FedProx on client models. This algorithm behaves the same as federated averaging, except that it uses a proximal regularization term that encourages clients to not drift too far from the server model. The iterative process has the following methods inherited from `tff.learning.templates.LearningProcess`: * `initialize`: A `tff.Computation` with the functional type signature `( -> S@SERVER)`, where `S` is a `LearningAlgorithmState` representing the initial state of the server. * `next`: A `tff.Computation` with the functional type signature `(<S@SERVER, {B*}@CLIENTS> -> <L@SERVER>)` where `S` is a `LearningAlgorithmState` whose type matches the output of `initialize` and `{B*}@CLIENTS` represents the client datasets. The output `L` contains the updated server state, as well as metrics that are the result of `tff.learning.Model.federated_output_computation` during client training, and any other metrics from broadcast and aggregation processes. * `report`: A `tff.Computation` with type signature `( -> M@SERVER)`, where `M` represents the type of the model weights used during training. Each time the `next` method is called, the server model is broadcast to each client using a broadcast function. For each client, local training is performed using `client_optimizer_fn`. Each client computes the difference between the client model after training and the initial broadcast model. These model deltas are then aggregated at the server using a weighted aggregation function. Clients weighted by the number of examples they see thoughout local training. The aggregate model delta is applied at the server using a server optimizer, as in the FedOpt framework proposed in [Reddi et al., 2021](https://arxiv.org/abs/2003.00295). Note: The default server optimizer function is `tf.keras.optimizers.SGD` with a learning rate of 1.0, which corresponds to adding the model delta to the current server model. This recovers the original FedProx algorithm in [Li et al., 2020](https://arxiv.org/abs/1812.06127). More sophisticated federated averaging procedures may use different learning rates or server optimizers. Args: model_fn: A no-arg function that returns a `tff.learning.Model`. This method must *not* capture TensorFlow tensors or variables and use them. The model must be constructed entirely from scratch on each invocation, returning the same pre-constructed model each call will result in an error. proximal_strength: A nonnegative float representing the parameter of FedProx's regularization term. When set to `0`, the algorithm reduces to FedAvg. Higher values prevent clients from moving too far from the server model during local training. client_optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-arg callable that returns a `tf.keras.Optimizer`. server_optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-arg callable that returns a `tf.keras.Optimizer`. By default, this uses `tf.keras.optimizers.SGD` with a learning rate of 1.0. distributor: An optional `DistributionProcess` that broadcasts the model weights on the server to the clients. If set to `None`, the distributor is constructed via `distributors.build_broadcast_process`. model_update_aggregation_factory: An optional `tff.aggregators.WeightedAggregationFactory` used to aggregate client updates on the server. If `None`, this is set to `tff.aggregators.MeanFactory`. use_experimental_simulation_loop: Controls the reduce loop function for input dataset. An experimental reduce loop is used for simulation. It is currently necessary to set this flag to True for performant GPU simulations. Returns: A `LearningProcess`. Raises: ValueError: If `proximal_parameter` is not a nonnegative float. """ if not isinstance(proximal_strength, float) or proximal_strength < 0.0: raise ValueError( 'proximal_strength must be a nonnegative float, found {}'.format( proximal_strength)) py_typecheck.check_callable(model_fn) @computations.tf_computation() def initial_model_weights_fn(): return model_utils.ModelWeights.from_model(model_fn()) model_weights_type = initial_model_weights_fn.type_signature.result if distributor is None: distributor = distributors.build_broadcast_process(model_weights_type) if model_update_aggregation_factory is None: model_update_aggregation_factory = mean.MeanFactory() py_typecheck.check_type(model_update_aggregation_factory, factory.WeightedAggregationFactory) aggregator = model_update_aggregation_factory.create( model_weights_type.trainable, computation_types.TensorType(tf.float32)) process_signature = aggregator.next.type_signature input_client_value_type = process_signature.parameter[1] result_server_value_type = process_signature.result[1] if input_client_value_type.member != result_server_value_type.member: raise TypeError('`model_update_aggregation_factory` does not produce a ' 'compatible `AggregationProcess`. The processes must ' 'retain the type structure of the inputs on the ' f'server, but got {input_client_value_type.member} != ' f'{result_server_value_type.member}.') client_work = build_fed_prox_client_work(model_fn, proximal_strength, client_optimizer_fn, use_experimental_simulation_loop) finalizer = finalizers.build_apply_optimizer_finalizer( server_optimizer_fn, model_weights_type) return composers.compose_learning_process(initial_model_weights_fn, distributor, client_work, aggregator, finalizer)
def build_example_weighted_fed_prox_process( model_fn: Callable[[], model_lib.Model], proximal_strength: float, client_optimizer_fn: Union[optimizer_base.Optimizer, Callable[[], tf.keras.optimizers.Optimizer]], server_optimizer_fn: Union[optimizer_base.Optimizer, Callable[ [], tf.keras.optimizers.Optimizer]] = DEFAULT_SERVER_OPTIMIZER_FN, distributor: Optional[distributors.DistributionProcess] = None, model_update_aggregation_factory: Optional[ factory.WeightedAggregationFactory] = None, use_experimental_simulation_loop: bool = False ) -> learning_process.LearningProcess: """Builds a learning process that performs the FedProx algorithm. This function creates a `LearningProcess` that performs example-weighted FedProx on client models. This algorithm behaves the same as federated averaging, except that it uses a proximal regularization term that encourages clients to not drift too far from the server model. The iterative process has the following methods inherited from `tff.learning.templates.LearningProcess`: * `initialize`: A `tff.Computation` with the functional type signature `( -> S@SERVER)`, where `S` is a `LearningAlgorithmState` representing the initial state of the server. * `next`: A `tff.Computation` with the functional type signature `(<S@SERVER, {B*}@CLIENTS> -> <L@SERVER>)` where `S` is a `LearningAlgorithmState` whose type matches the output of `initialize` and `{B*}@CLIENTS` represents the client datasets. The output `L` contains the updated server state, as well as metrics that are the result of `tff.learning.Model.federated_output_computation` during client training, and any other metrics from broadcast and aggregation processes. * `report`: A `tff.Computation` with type signature `( -> M@SERVER)`, where `M` represents the type of the model weights used during training. Each time the `next` method is called, the server model is broadcast to each client using a broadcast function. For each client, local training is performed using `client_optimizer_fn`. Each client computes the difference between the client model after training and the initial broadcast model. These model deltas are then aggregated at the server using a weighted aggregation function. Clients weighted by the number of examples they see thoughout local training. The aggregate model delta is applied at the server using a server optimizer, as in the FedOpt framework proposed in [Reddi et al., 2021](https://arxiv.org/abs/2003.00295). Note: The default server optimizer function is `tf.keras.optimizers.SGD` with a learning rate of 1.0, which corresponds to adding the model delta to the current server model. This recovers the original FedProx algorithm in [Li et al., 2020](https://arxiv.org/abs/1812.06127). More sophisticated federated averaging procedures may use different learning rates or server optimizers. Args: model_fn: A no-arg function that returns a `tff.learning.Model`. This method must *not* capture TensorFlow tensors or variables and use them. The model must be constructed entirely from scratch on each invocation, returning the same pre-constructed model each call will result in an error. proximal_strength: A nonnegative float representing the parameter of FedProx's regularization term. When set to `0`, the algorithm reduces to FedAvg. Higher values prevent clients from moving too far from the server model during local training. client_optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-arg callable that returns a `tf.keras.Optimizer`. server_optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-arg callable that returns a `tf.keras.Optimizer`. By default, this uses `tf.keras.optimizers.SGD` with a learning rate of 1.0. distributor: An optional `DistributionProcess` that broadcasts the model weights on the server to the clients. If set to `None`, the distributor is constructed via `distributors.build_broadcast_process`. model_update_aggregation_factory: An optional `tff.aggregators.WeightedAggregationFactory` used to aggregate client updates on the server. If `None`, this is set to `tff.aggregators.MeanFactory`. use_experimental_simulation_loop: Controls the reduce loop function for input dataset. An experimental reduce loop is used for simulation. It is currently necessary to set this flag to True for performant GPU simulations. Returns: A `LearningProcess`. Raises: ValueError: If `proximal_parameter` is not a nonnegative float. """ if not isinstance(proximal_strength, float) or proximal_strength < 0.0: raise ValueError( 'proximal_strength must be a nonnegative float, found {}'.format( proximal_strength)) py_typecheck.check_callable(model_fn) @computations.tf_computation() def initial_model_weights_fn(): return model_utils.ModelWeights.from_model(model_fn()) model_weights_type = initial_model_weights_fn.type_signature.result if distributor is None: distributor = distributors.build_broadcast_process(model_weights_type) if model_update_aggregation_factory is None: model_update_aggregation_factory = mean.MeanFactory() py_typecheck.check_type(model_update_aggregation_factory, factory.WeightedAggregationFactory) aggregator = model_update_aggregation_factory.create( model_weights_type.trainable, computation_types.TensorType(tf.float32)) process_signature = aggregator.next.type_signature input_client_value_type = process_signature.parameter[1] result_server_value_type = process_signature.result[1] if input_client_value_type.member != result_server_value_type.member: raise TypeError('`model_update_aggregation_factory` does not produce a ' 'compatible `AggregationProcess`. The processes must ' 'retain the type structure of the inputs on the ' f'server, but got {input_client_value_type.member} != ' f'{result_server_value_type.member}.') client_work = build_fed_prox_client_work(model_fn, proximal_strength, client_optimizer_fn, use_experimental_simulation_loop) finalizer = finalizers.build_apply_optimizer_finalizer( server_optimizer_fn, model_weights_type) return composers.compose_learning_process(initial_model_weights_fn, distributor, client_work, aggregator, finalizer)
Python
def _is_non_shopping_product(self, product_field: str) -> bool: """Determines if a product's field contains text indicating it is not intended for Shopping. Args: product_field: A field of a product to check for existence Shopping exclusion terms. Returns: True if the given field was detected to have Shopping exclusion terms, otherwise false. """ if any(shopping_exclusion_pattern in product_field for shopping_exclusion_pattern in self.shopping_removal_patterns_exact_match): return True else: return False
def _is_non_shopping_product(self, product_field: str) -> bool: """Determines if a product's field contains text indicating it is not intended for Shopping. Args: product_field: A field of a product to check for existence Shopping exclusion terms. Returns: True if the given field was detected to have Shopping exclusion terms, otherwise false. """ if any(shopping_exclusion_pattern in product_field for shopping_exclusion_pattern in self.shopping_removal_patterns_exact_match): return True else: return False
Python
def _optimize(self, product_batch: Dict[str, Any], language: str, country: str, currency: str) -> int: """Runs the optimization. This is called by process() in the base class. Args: product_batch: A batch of product data. language: The language to use for this optimizer. country: The country to use for this optimizer. currency: The currency to use for this optimizer. Returns: The number of products affected by this optimization. """ gpc_string_to_id_mapping = current_app.config.get('CONFIGS', {}).get( _GCP_STRING_TO_ID_MAPPING_CONFIG_FILE_NAME.format(language), {}) title_word_order_config = current_app.config.get('CONFIGS', {}).get( _TITLE_WORD_ORDER_CONFIG_FILE_NAME.format(language), {}) blocklist_config = current_app.config.get('CONFIGS', {}).get( _TITLE_WORD_ORDER_BLOCKLIST_FILE_NAME.format(language), {}) keyword_blocklist = [keyword.lower() for keyword in blocklist_config] self._title_word_order_options = current_app.config.get( 'CONFIGS', {}).get(_TITLE_WORD_ORDER_OPTIONS_FILE_NAME) optimization_includes_description = ( self._optimization_includes_description()) optimization_includes_product_types = ( self._optimization_includes_product_types()) promo_text_remover = promo_text_remover_lib.PromoTextRemover( language=language) optimization_level = self._get_optimization_level() num_of_products_optimized = 0 for entry in product_batch['entries']: product = entry['product'] original_title = product.get('title', None) description = product.get('description', None) product_types = product.get('productTypes', []) gpc = product.get('googleProductCategory', '') if not original_title: continue # Get the string version of the GPC if it was provided as a number ID. if isinstance(gpc, int) or gpc.isdigit(): gpc_string = get_gpc_as_string(gpc, gpc_string_to_id_mapping) if not gpc_string: continue else: gpc_string = gpc if _should_skip_optimization(gpc_string, optimization_level): continue gpc_id = _get_level_3_gpc_id(gpc_string, gpc_string_to_id_mapping) if not gpc_id: continue keywords_for_gpc = title_word_order_config.get(str(gpc_id), []) allowed_keywords_for_gpc = _remove_keywords_in_blocklist( keywords_for_gpc, keyword_blocklist) allowed_keywords_for_gpc = _remove_keywords_with_promo( promo_text_remover, allowed_keywords_for_gpc) sorted_keywords_for_gpc = _sort_keywords_for_gpc_by_descending_weight( allowed_keywords_for_gpc) title_to_process = original_title title_words = _tokenize_text(title_to_process, language) description_words = _tokenize_text( description, language) if optimization_includes_description else [] joined_product_types = ' '.join(product_types) product_types_words = _tokenize_text( joined_product_types, language) if optimization_includes_product_types else [] (keywords_visible_to_user, keywords_not_visible_to_user, title_without_keywords) = ( _generate_front_and_back_keyword_lists(sorted_keywords_for_gpc, title_to_process, title_words, description_words, product_types_words, language)) keywords_to_prepend = _generate_list_of_keywords_to_prepend( keywords_visible_to_user, keywords_not_visible_to_user, title_to_process, language) ordered_keywords_to_prepend = _reorder_keywords_by_weight( keywords_to_prepend, sorted_keywords_for_gpc) optimized_title = _generate_prepended_title(ordered_keywords_to_prepend, title_to_process) if len(optimized_title) > _MAX_TITLE_LENGTH: optimized_title = _generate_prepended_title(ordered_keywords_to_prepend, title_without_keywords) product['title'] = optimized_title if product.get('title', '') != original_title: logging.info( 'Modified item %s: Moved high-performing keywords to front of title: %s', product['offerId'], product['title']) num_of_products_optimized += 1 base_optimizer.set_optimization_tracking(product, base_optimizer.OPTIMIZED) return num_of_products_optimized
def _optimize(self, product_batch: Dict[str, Any], language: str, country: str, currency: str) -> int: """Runs the optimization. This is called by process() in the base class. Args: product_batch: A batch of product data. language: The language to use for this optimizer. country: The country to use for this optimizer. currency: The currency to use for this optimizer. Returns: The number of products affected by this optimization. """ gpc_string_to_id_mapping = current_app.config.get('CONFIGS', {}).get( _GCP_STRING_TO_ID_MAPPING_CONFIG_FILE_NAME.format(language), {}) title_word_order_config = current_app.config.get('CONFIGS', {}).get( _TITLE_WORD_ORDER_CONFIG_FILE_NAME.format(language), {}) blocklist_config = current_app.config.get('CONFIGS', {}).get( _TITLE_WORD_ORDER_BLOCKLIST_FILE_NAME.format(language), {}) keyword_blocklist = [keyword.lower() for keyword in blocklist_config] self._title_word_order_options = current_app.config.get( 'CONFIGS', {}).get(_TITLE_WORD_ORDER_OPTIONS_FILE_NAME) optimization_includes_description = ( self._optimization_includes_description()) optimization_includes_product_types = ( self._optimization_includes_product_types()) promo_text_remover = promo_text_remover_lib.PromoTextRemover( language=language) optimization_level = self._get_optimization_level() num_of_products_optimized = 0 for entry in product_batch['entries']: product = entry['product'] original_title = product.get('title', None) description = product.get('description', None) product_types = product.get('productTypes', []) gpc = product.get('googleProductCategory', '') if not original_title: continue # Get the string version of the GPC if it was provided as a number ID. if isinstance(gpc, int) or gpc.isdigit(): gpc_string = get_gpc_as_string(gpc, gpc_string_to_id_mapping) if not gpc_string: continue else: gpc_string = gpc if _should_skip_optimization(gpc_string, optimization_level): continue gpc_id = _get_level_3_gpc_id(gpc_string, gpc_string_to_id_mapping) if not gpc_id: continue keywords_for_gpc = title_word_order_config.get(str(gpc_id), []) allowed_keywords_for_gpc = _remove_keywords_in_blocklist( keywords_for_gpc, keyword_blocklist) allowed_keywords_for_gpc = _remove_keywords_with_promo( promo_text_remover, allowed_keywords_for_gpc) sorted_keywords_for_gpc = _sort_keywords_for_gpc_by_descending_weight( allowed_keywords_for_gpc) title_to_process = original_title title_words = _tokenize_text(title_to_process, language) description_words = _tokenize_text( description, language) if optimization_includes_description else [] joined_product_types = ' '.join(product_types) product_types_words = _tokenize_text( joined_product_types, language) if optimization_includes_product_types else [] (keywords_visible_to_user, keywords_not_visible_to_user, title_without_keywords) = ( _generate_front_and_back_keyword_lists(sorted_keywords_for_gpc, title_to_process, title_words, description_words, product_types_words, language)) keywords_to_prepend = _generate_list_of_keywords_to_prepend( keywords_visible_to_user, keywords_not_visible_to_user, title_to_process, language) ordered_keywords_to_prepend = _reorder_keywords_by_weight( keywords_to_prepend, sorted_keywords_for_gpc) optimized_title = _generate_prepended_title(ordered_keywords_to_prepend, title_to_process) if len(optimized_title) > _MAX_TITLE_LENGTH: optimized_title = _generate_prepended_title(ordered_keywords_to_prepend, title_without_keywords) product['title'] = optimized_title if product.get('title', '') != original_title: logging.info( 'Modified item %s: Moved high-performing keywords to front of title: %s', product['offerId'], product['title']) num_of_products_optimized += 1 base_optimizer.set_optimization_tracking(product, base_optimizer.OPTIMIZED) return num_of_products_optimized
Python
def _tokenize_text(text: str, language: str) -> List[str]: """Splits text into individual words using the correct method for the given language. Args: text: Text to be split. language: The configured language code. Returns: The text tokenized into a list of words. """ if language == constants.LANGUAGE_CODE_JA: return _split_words_in_japanese(text) else: return text.split()
def _tokenize_text(text: str, language: str) -> List[str]: """Splits text into individual words using the correct method for the given language. Args: text: Text to be split. language: The configured language code. Returns: The text tokenized into a list of words. """ if language == constants.LANGUAGE_CODE_JA: return _split_words_in_japanese(text) else: return text.split()
Python
def _split_words_in_japanese(text: str) -> List[str]: """Splits Japanese text into words by using MeCab. Args: text: Text to be split. Returns: The text tokenized into a list of semantically delineated words. """ mecab_tagger = current_app.config.get('MECAB') if not mecab_tagger: logging.warning('Did not parse title because MeCab was not set up.') return [] node = mecab_tagger.parseToNode(text) split_words = [] while node: split_words.append(node.surface) node = node.next return split_words
def _split_words_in_japanese(text: str) -> List[str]: """Splits Japanese text into words by using MeCab. Args: text: Text to be split. Returns: The text tokenized into a list of semantically delineated words. """ mecab_tagger = current_app.config.get('MECAB') if not mecab_tagger: logging.warning('Did not parse title because MeCab was not set up.') return [] node = mecab_tagger.parseToNode(text) split_words = [] while node: split_words.append(node.surface) node = node.next return split_words