language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def f_hist_trades(param_ct, param_ini, param_end): """ Get the historical executed trades in the account associated with the initialized MetaTrader5 client Params ------ param_ct: MetaTrader5 initialized client object This is an already succesfully initialized conexion object to MetaTrader5 Desktop App param_ini: datetime Initial date to draw the historical trades param_ini = datetime(2021, 2, 1) param_end: datetime Final date to draw the historical trades param_end = datetime(2021, 3, 1) Returns ------- df_hist_trades: pd.DataFrame References ---------- https://www.mql5.com/en/docs/integration/python_metatrader5/mt5historydealsget_py https://www.mql5.com/en/docs/constants/tradingconstants/dealproperties https://www.mql5.com/en/docs/integration/python_metatrader5/mt5historyordersget_py https://www.mql5.com/en/docs/constants/tradingconstants/orderproperties#enum_order_property_integer """ # get historical info of deals in the account history_deals = param_ct.history_deals_get(param_ini, param_end) # get historical info of orders in the account history_orders = param_ct.history_orders_get(param_ini, param_end) # check for returned results if (len(history_orders) > 0) & (len(history_deals) > 0): print(" ++++ Historical orders retrive: OK") print(" ++++ Historical deals retrive: OK") else: print("No orders and/or deals returned") # historical deals of the account df_deals = pd.DataFrame(list(history_deals), columns=history_deals[0]._asdict().keys()) # historical orders of the account df_orders = pd.DataFrame(list(history_orders), columns=history_orders[0]._asdict().keys()) # useful columns from orders df_hist_trades = df_orders[['time_setup', 'symbol', 'position_id', 'type', 'volume_current', 'price_open', 'sl', 'tp']] # useful columns from deals df_deals_hist = df_deals[['position_id', 'type', 'price', 'volume']] # rename columns df_hist_trades.columns = ['OpenTime', 'Symbol', 'Ticket', 'Type', 'Volume', 'OpenPrice', 'S/L', 'T/P'] df_deals_hist.columns = ['Ticket', 'Type', 'Price', 'Volume'] # choose only buy or sell transactions (ignore all the rest, like balance ...) df_hist_trades = df_hist_trades[(df_hist_trades['Type'] == 0) | (df_hist_trades['Type'] == 1)] df_deals_hist = df_deals_hist[(df_deals_hist['Type'] == 0) | (df_deals_hist['Type'] == 1)] df_hist_trades['OpenTime'] = pd.to_datetime(df_hist_trades['OpenTime'], unit='s') # unique values for position_id uni_id = df_hist_trades['Ticket'].unique() # first and last index for every unique value of position_id ind_profloss = [df_hist_trades.index[df_hist_trades['Ticket'] == i][0] for i in uni_id] ind_open = [df_deals_hist.index[df_deals_hist['Ticket'] == i][0] for i in uni_id] ind_close = [df_deals_hist.index[df_deals_hist['Ticket'] == i][-1] for i in uni_id] # generate lists with values to add cts = df_hist_trades['OpenTime'].loc[ind_open] ops = df_deals_hist['Price'].loc[ind_open] cps = df_deals_hist['Price'].loc[ind_close] vol = df_deals_hist['Volume'].loc[ind_close] # resize dataframe to have only the first value of every unique position_id df_hist_trades = df_hist_trades.loc[ind_profloss] # add close time and close price as a column to dataframe df_hist_trades['CloseTime'] = cts.to_list() df_hist_trades['OpenPrice'] = ops.to_list() df_hist_trades['ClosePrice'] = cps.to_list() df_hist_trades['Volume'] = vol.to_list() df_hist_trades['Profit'] = df_deals['profit'].loc[df_deals['position_id'].isin(uni_id) & df_deals['entry'] == 1].to_list() return df_hist_trades
def f_hist_trades(param_ct, param_ini, param_end): """ Get the historical executed trades in the account associated with the initialized MetaTrader5 client Params ------ param_ct: MetaTrader5 initialized client object This is an already succesfully initialized conexion object to MetaTrader5 Desktop App param_ini: datetime Initial date to draw the historical trades param_ini = datetime(2021, 2, 1) param_end: datetime Final date to draw the historical trades param_end = datetime(2021, 3, 1) Returns ------- df_hist_trades: pd.DataFrame References ---------- https://www.mql5.com/en/docs/integration/python_metatrader5/mt5historydealsget_py https://www.mql5.com/en/docs/constants/tradingconstants/dealproperties https://www.mql5.com/en/docs/integration/python_metatrader5/mt5historyordersget_py https://www.mql5.com/en/docs/constants/tradingconstants/orderproperties#enum_order_property_integer """ # get historical info of deals in the account history_deals = param_ct.history_deals_get(param_ini, param_end) # get historical info of orders in the account history_orders = param_ct.history_orders_get(param_ini, param_end) # check for returned results if (len(history_orders) > 0) & (len(history_deals) > 0): print(" ++++ Historical orders retrive: OK") print(" ++++ Historical deals retrive: OK") else: print("No orders and/or deals returned") # historical deals of the account df_deals = pd.DataFrame(list(history_deals), columns=history_deals[0]._asdict().keys()) # historical orders of the account df_orders = pd.DataFrame(list(history_orders), columns=history_orders[0]._asdict().keys()) # useful columns from orders df_hist_trades = df_orders[['time_setup', 'symbol', 'position_id', 'type', 'volume_current', 'price_open', 'sl', 'tp']] # useful columns from deals df_deals_hist = df_deals[['position_id', 'type', 'price', 'volume']] # rename columns df_hist_trades.columns = ['OpenTime', 'Symbol', 'Ticket', 'Type', 'Volume', 'OpenPrice', 'S/L', 'T/P'] df_deals_hist.columns = ['Ticket', 'Type', 'Price', 'Volume'] # choose only buy or sell transactions (ignore all the rest, like balance ...) df_hist_trades = df_hist_trades[(df_hist_trades['Type'] == 0) | (df_hist_trades['Type'] == 1)] df_deals_hist = df_deals_hist[(df_deals_hist['Type'] == 0) | (df_deals_hist['Type'] == 1)] df_hist_trades['OpenTime'] = pd.to_datetime(df_hist_trades['OpenTime'], unit='s') # unique values for position_id uni_id = df_hist_trades['Ticket'].unique() # first and last index for every unique value of position_id ind_profloss = [df_hist_trades.index[df_hist_trades['Ticket'] == i][0] for i in uni_id] ind_open = [df_deals_hist.index[df_deals_hist['Ticket'] == i][0] for i in uni_id] ind_close = [df_deals_hist.index[df_deals_hist['Ticket'] == i][-1] for i in uni_id] # generate lists with values to add cts = df_hist_trades['OpenTime'].loc[ind_open] ops = df_deals_hist['Price'].loc[ind_open] cps = df_deals_hist['Price'].loc[ind_close] vol = df_deals_hist['Volume'].loc[ind_close] # resize dataframe to have only the first value of every unique position_id df_hist_trades = df_hist_trades.loc[ind_profloss] # add close time and close price as a column to dataframe df_hist_trades['CloseTime'] = cts.to_list() df_hist_trades['OpenPrice'] = ops.to_list() df_hist_trades['ClosePrice'] = cps.to_list() df_hist_trades['Volume'] = vol.to_list() df_hist_trades['Profit'] = df_deals['profit'].loc[df_deals['position_id'].isin(uni_id) & df_deals['entry'] == 1].to_list() return df_hist_trades
Python
def f_hist_prices(param_ct, param_sym, param_tf, param_ini, param_end): """ Historical prices retrival from MetaTrader 5 Desktop App. Parameters ---------- param_ct: MetaTrader5 initialized client object This is an already succesfully initialized conexion object to MetaTrader5 Desktop App param_sym: str The symbol of which the historical prices will be retrieved param_sym = 'EURUSD' param_tf: str The price granularity for the historical prices. Check available timeframes and nomenclatures from the references. The substring 'TIMEFRAME_' is automatically added. param_tf = 'M1' param_ini: datetime Initial date to draw the historical trades param_ini = datetime(2021, 2, 1) param_end: datetime Final date to draw the historical trades param_end = datetime(2021, 3, 1) **** WARNINGS **** 1.- Available History MetaTrader 5 terminal provides bars only within a history available to a user on charts. The number of # bars available to users is set in the "Max.bars in chart" parameter. So this must be done manually within the desktop app to which the connection is made. 2.- TimeZone When creating the 'datetime' object, Python uses the local time zone, MetaTrader 5 stores tick and bar open time in UTC time zone (without the shift). Data received from the MetaTrader 5 terminal has UTC time. Perform a validation whether if its necessary to shift time to local timezone. **** ******** **** References ---------- https://www.mql5.com/en/docs/integration/python_metatrader5/mt5copyratesfrom_py#timeframe """ # get hour info in UTC timezone (also GMT+0) hour_utc = datetime.datetime.now().utcnow().hour # get hour info in local timezone (your computer) hour_here = datetime.datetime.now().hour # difference (in hours) from UTC timezone diff_here_utc = hour_utc - hour_here # store the difference in hours tdelta = datetime.timedelta(hours=diff_here_utc) # granularity param_tf = getattr(param_ct, 'TIMEFRAME_' + param_tf) # dictionary for more than 1 symbol to retrieve prices d_prices = {} # retrieve prices for every symbol in the list for symbol in param_sym: # prices retrival from MetaTrader 5 Desktop App prices = pd.DataFrame(param_ct.copy_rates_range(symbol, param_tf, param_ini - tdelta, param_end - tdelta)) # convert to datetime prices['time'] = [datetime.datetime.fromtimestamp(times) for times in prices['time']] # store in dict with symbol as a key d_prices[symbol] = prices # return historical prices return d_prices
def f_hist_prices(param_ct, param_sym, param_tf, param_ini, param_end): """ Historical prices retrival from MetaTrader 5 Desktop App. Parameters ---------- param_ct: MetaTrader5 initialized client object This is an already succesfully initialized conexion object to MetaTrader5 Desktop App param_sym: str The symbol of which the historical prices will be retrieved param_sym = 'EURUSD' param_tf: str The price granularity for the historical prices. Check available timeframes and nomenclatures from the references. The substring 'TIMEFRAME_' is automatically added. param_tf = 'M1' param_ini: datetime Initial date to draw the historical trades param_ini = datetime(2021, 2, 1) param_end: datetime Final date to draw the historical trades param_end = datetime(2021, 3, 1) **** WARNINGS **** 1.- Available History MetaTrader 5 terminal provides bars only within a history available to a user on charts. The number of # bars available to users is set in the "Max.bars in chart" parameter. So this must be done manually within the desktop app to which the connection is made. 2.- TimeZone When creating the 'datetime' object, Python uses the local time zone, MetaTrader 5 stores tick and bar open time in UTC time zone (without the shift). Data received from the MetaTrader 5 terminal has UTC time. Perform a validation whether if its necessary to shift time to local timezone. **** ******** **** References ---------- https://www.mql5.com/en/docs/integration/python_metatrader5/mt5copyratesfrom_py#timeframe """ # get hour info in UTC timezone (also GMT+0) hour_utc = datetime.datetime.now().utcnow().hour # get hour info in local timezone (your computer) hour_here = datetime.datetime.now().hour # difference (in hours) from UTC timezone diff_here_utc = hour_utc - hour_here # store the difference in hours tdelta = datetime.timedelta(hours=diff_here_utc) # granularity param_tf = getattr(param_ct, 'TIMEFRAME_' + param_tf) # dictionary for more than 1 symbol to retrieve prices d_prices = {} # retrieve prices for every symbol in the list for symbol in param_sym: # prices retrival from MetaTrader 5 Desktop App prices = pd.DataFrame(param_ct.copy_rates_range(symbol, param_tf, param_ini - tdelta, param_end - tdelta)) # convert to datetime prices['time'] = [datetime.datetime.fromtimestamp(times) for times in prices['time']] # store in dict with symbol as a key d_prices[symbol] = prices # return historical prices return d_prices
Python
def normaliseData(self, data): ''' Each dictionary in data may have different keys. In order to plot them it is needed to normalise them, i.e. each dictionary has exactly the same keys. The default value for all the keys that were not present in the dictionary is zero. Args: data (dict): For each layer it holds a dictionary with domain names and a value (either the number of packets or the amount of traffic). Usually self.dataDict is passed as an argument. ''' keys = self.getKeysFromDict(data) for layer, domains in data.items(): for key in keys: if key not in domains: data[layer][key] = 0
def normaliseData(self, data): ''' Each dictionary in data may have different keys. In order to plot them it is needed to normalise them, i.e. each dictionary has exactly the same keys. The default value for all the keys that were not present in the dictionary is zero. Args: data (dict): For each layer it holds a dictionary with domain names and a value (either the number of packets or the amount of traffic). Usually self.dataDict is passed as an argument. ''' keys = self.getKeysFromDict(data) for layer, domains in data.items(): for key in keys: if key not in domains: data[layer][key] = 0
Python
def plotFig(self): ''' Plots the figure. Because the plot is stacked it is necessary to define where the next set of bars have to start. For this purpose the "left" variable stores at which position the next bar should start. First all data need to be normalised. After this operation each dictionary contains the same set of keys. Variables: left (list): It is the same length as the number of entries in each dictionary. After the current layer is processed it sums the values from the current layer with the already processed values. keys (list): Sorted list of unique keys from each dictionary. They have to be sorted so they are in the same order for each layer. vals (list): Values associated with the keys. ''' plots = [] labels = [] self.normaliseData(self.dataDict) left = [0]*len(self.dataDict[list(dict.keys(self.dataDict))[0]]) for layer, data in self.dataDict.items(): keys, vals = list(zip(*sorted(data.items()))) #print(keys, vals, left) plots.append(self.plot.barh(keys, vals, left=left)) left = [l1 + l2 for l1, l2 in zip(left, list(vals))] labels.append(layer) self.plot.legend(plots, labels)
def plotFig(self): ''' Plots the figure. Because the plot is stacked it is necessary to define where the next set of bars have to start. For this purpose the "left" variable stores at which position the next bar should start. First all data need to be normalised. After this operation each dictionary contains the same set of keys. Variables: left (list): It is the same length as the number of entries in each dictionary. After the current layer is processed it sums the values from the current layer with the already processed values. keys (list): Sorted list of unique keys from each dictionary. They have to be sorted so they are in the same order for each layer. vals (list): Values associated with the keys. ''' plots = [] labels = [] self.normaliseData(self.dataDict) left = [0]*len(self.dataDict[list(dict.keys(self.dataDict))[0]]) for layer, data in self.dataDict.items(): keys, vals = list(zip(*sorted(data.items()))) #print(keys, vals, left) plots.append(self.plot.barh(keys, vals, left=left)) left = [l1 + l2 for l1, l2 in zip(left, list(vals))] labels.append(layer) self.plot.legend(plots, labels)
Python
def processLayer(self, packet, layer): """ Currently it is not needed to store all packets """ #self.packets.append(layer) time = float(packet.frame_info.time_epoch) - self.node.baseTS self.packetTS.append(time) try: self.packetDiff.append(time - self.packetTS[-2]) except IndexError: self.packetDiff.append(0) length = self.getDataLength(layer) if length >= 0: self.packetSize.append(length) else: self.packetSize.append(packet.length) self.increaseCount(self.addrPacketNum, packet.addr.getAddr()) self.increaseCount(self.addrPacketSize, packet.addr.getAddr(), packet.length) if self.layerHasPort(layer): self.increaseCount(self.srcPort, layer.srcport) self.increaseCount(self.destPort, layer.dstport) if 'flags' in layer.field_names: self.flags.append(layer.flags) if 'options' in layer.field_names: self.options.append(layer.options)
def processLayer(self, packet, layer): """ Currently it is not needed to store all packets """ #self.packets.append(layer) time = float(packet.frame_info.time_epoch) - self.node.baseTS self.packetTS.append(time) try: self.packetDiff.append(time - self.packetTS[-2]) except IndexError: self.packetDiff.append(0) length = self.getDataLength(layer) if length >= 0: self.packetSize.append(length) else: self.packetSize.append(packet.length) self.increaseCount(self.addrPacketNum, packet.addr.getAddr()) self.increaseCount(self.addrPacketSize, packet.addr.getAddr(), packet.length) if self.layerHasPort(layer): self.increaseCount(self.srcPort, layer.srcport) self.increaseCount(self.destPort, layer.dstport) if 'flags' in layer.field_names: self.flags.append(layer.flags) if 'options' in layer.field_names: self.options.append(layer.options)
Python
def split_layers(infile): if not os.path.exists(infile): return [] """ infile is the ek output from .pcap file """ for_pd = [] num_orginal_pkt = 0 num_udp_tcp_only = 0 num_omit_filter = 0 with open(infile) as sf: for line in sf.readlines(): line = line.strip() if line.startswith('{"timestamp"'): """ For each frame """ num_orginal_pkt += 1 res = process_pkt(line, infile) # print(len(line)) if res is None: continue """ Agg result of rows """ for_pd.append(res) return for_pd
def split_layers(infile): if not os.path.exists(infile): return [] """ infile is the ek output from .pcap file """ for_pd = [] num_orginal_pkt = 0 num_udp_tcp_only = 0 num_omit_filter = 0 with open(infile) as sf: for line in sf.readlines(): line = line.strip() if line.startswith('{"timestamp"'): """ For each frame """ num_orginal_pkt += 1 res = process_pkt(line, infile) # print(len(line)) if res is None: continue """ Agg result of rows """ for_pd.append(res) return for_pd
Python
def compute_pkt(ek_obj, tp_layer, list_detected_layers): layers_obj = ek_obj[K_LAYER] """ Determine the data protocol, the data type """ timestamp = ek_obj['timestamp'] if 'ip' not in list_detected_layers: return """ Determine the protocol of application layer by looking at 5th protocol in the frame """ data_proto = 'data' reason = 'info:' # eth:ethertype:ip:tcp:http frame_number = layers_obj['frame']['frame_frame_number'] frame_protocols = layers_obj['frame']['frame_frame_protocols'].split(':') if len(frame_protocols) > 4: data_proto = frame_protocols[4] if data_proto == 'data': for dl in list_possible_data_layers: if dl in list_detected_layers: data_proto = dl break ip_src = layers_obj['ip']['ip_ip_src'] ip_dst = layers_obj['ip']['ip_ip_dst'] tp_srcport = layers_obj[tp_layer]['%s_%s_srcport' % (tp_layer, tp_layer)] tp_dstport = layers_obj[tp_layer]['%s_%s_dstport' % (tp_layer, tp_layer)] """ Entropy of UDP/TCP payload raw, after hex decoding. """ if tp_layer == 'tcp': if 'tcp_tcp_payload_raw' in layers_obj[tp_layer]: data_stream = layers_obj[tp_layer]['tcp_tcp_payload_raw'] else: if 'tcp_tcp_len' in layers_obj[tp_layer]: if layers_obj[tp_layer]['tcp_tcp_len'] == 0: return return elif tp_layer == 'udp': data_stream = ek_obj[K_LAYER]['frame_raw'][84:] # print(data_stream) # print('TOUCH') # exit(0) """ 1 char of hex code = 4bit of data, thus the byte is /2 data_bytes: bytes of udp/tcp payload """ data_bytes = len(data_stream) / 2 if data_bytes < TH_DATA_LEN_EMPTY: etp = -1 data_type = DT_OMIT reason += 'small payload (%dB)' % data_bytes result = [ip_src, ip_dst, tp_srcport, tp_dstport, tp_layer, data_proto, data_type, data_bytes, etp, reason] return result etp = entropy_after_decode(data_stream) """ Determine data type: unknown, text, media, compressed, encrypted """ data_type = 'unknown' """ Step 1. Known protocols, currently HTTP, SSL, DNS, RTP """ if data_proto == 'http': """ Use Content-Encoding in http header """ if 'http_http_content_encoding' in layers_obj[LAYER_HTTP]: http_ce = layers_obj[LAYER_HTTP]['http_http_content_encoding'] if http_ce in list_compressed: reason += 'http+content encoding=%s' % http_ce data_type = DT_COMPRESSED if data_type == 'unknown': """ Use content type in HTTP header """ if 'http_http_content_type' in layers_obj[LAYER_HTTP]: http_ct = layers_obj[LAYER_HTTP]['http_http_content_type'] if http_ct.startswith('text'): data_type = DT_TEXT reason += 'http_content_type (%s)' % http_ct elif http_ct.startswith('image') or http_ct.startswith('video'): data_type = DT_MEDIA_MAGIC reason += 'http_content_type (%s)' % http_ct if data_type == 'unknown': """ If wireshark identified certain media types """ for mt in list_media_proto: if mt in list_detected_layers: data_type = DT_MEDIA_MAGIC reason += 'http+media(%s)' % mt break if data_type == 'unknown': """ If wireshark identified certain text types: text, json, xml """ for tt in list_text_proto: if tt in list_detected_layers: data_type = DT_TEXT reason += 'http+text' break elif data_proto == 'ssl': if etp > TH_ENCRYPTED: reason += 'ssl+etp>%s' % TH_ENCRYPTED data_type = DT_ENCRYPTED elif 'ssl_handshake_text' in layers_obj[LAYER_SSL]: data_type = DT_TEXT reason += 'ssl+handshake' elif data_proto == 'dns': reason += 'dns' data_type = DT_TEXT if 'text_dns_dnskey_protocol' in layers_obj[LAYER_DNS]: data_type = DT_ENCRYPTED reason += ':dnskey' elif data_proto == 'rtp': reason += 'rtp:' # eth:ethertype:ip:udp:rtp:srp:ccsrl:h245 reason += layers_obj['frame']['frame_frame_protocols'][25:] data_type = DT_MEDIA_RTP elif data_proto == 'gquic': reason +='gquic' data_type = DT_ENCRYPTED """ Step 2. Check Magic Number or other file signature """ if data_type == DT_UNKNOWN: magic_type = check_magic_number(data_stream) if magic_type is not None: reason +='magic (%s)' % magic_type if magic_type in list_compressed: """ is a compressed type """ data_type = DT_COMPRESSED print('%s COMPRESSED: %s - %s %s' % (timestamp, data_proto, tp_layer, magic_type)) elif magic_type in list_media_proto: """ is a media type """ print('%s MEDIA: %s - %s %s' % (timestamp, data_proto, tp_layer, magic_type)) data_type = DT_MEDIA_MAGIC """ Step 3. Guess from <data_bytes, entropy> """ if data_type == DT_UNKNOWN: if etp > TH_HIGH: """ SUPER HIGH """ data_type = DT_ENCRYPTED reason += 'high entropy' elif etp < TH_LOW: """ Low entropy """ if data_bytes > TH_DATA_LEN_MEANINGFUL: data_type = DT_TEXT reason += 'low entropy' """ Step 4. Omit small unknown packets """ if data_type == DT_UNKNOWN and data_bytes < TH_DATA_LEN_OMIT: reason += ' small omit' data_type = DT_OMIT """ Result: ip_src,ip_dst,srcport,dstport,tp_proto,data_proto,data_type,data_len,entropy,reason """ result = [ip_src, ip_dst, tp_srcport, tp_dstport, tp_layer, data_proto, data_type, data_bytes, etp, reason] return result
def compute_pkt(ek_obj, tp_layer, list_detected_layers): layers_obj = ek_obj[K_LAYER] """ Determine the data protocol, the data type """ timestamp = ek_obj['timestamp'] if 'ip' not in list_detected_layers: return """ Determine the protocol of application layer by looking at 5th protocol in the frame """ data_proto = 'data' reason = 'info:' # eth:ethertype:ip:tcp:http frame_number = layers_obj['frame']['frame_frame_number'] frame_protocols = layers_obj['frame']['frame_frame_protocols'].split(':') if len(frame_protocols) > 4: data_proto = frame_protocols[4] if data_proto == 'data': for dl in list_possible_data_layers: if dl in list_detected_layers: data_proto = dl break ip_src = layers_obj['ip']['ip_ip_src'] ip_dst = layers_obj['ip']['ip_ip_dst'] tp_srcport = layers_obj[tp_layer]['%s_%s_srcport' % (tp_layer, tp_layer)] tp_dstport = layers_obj[tp_layer]['%s_%s_dstport' % (tp_layer, tp_layer)] """ Entropy of UDP/TCP payload raw, after hex decoding. """ if tp_layer == 'tcp': if 'tcp_tcp_payload_raw' in layers_obj[tp_layer]: data_stream = layers_obj[tp_layer]['tcp_tcp_payload_raw'] else: if 'tcp_tcp_len' in layers_obj[tp_layer]: if layers_obj[tp_layer]['tcp_tcp_len'] == 0: return return elif tp_layer == 'udp': data_stream = ek_obj[K_LAYER]['frame_raw'][84:] # print(data_stream) # print('TOUCH') # exit(0) """ 1 char of hex code = 4bit of data, thus the byte is /2 data_bytes: bytes of udp/tcp payload """ data_bytes = len(data_stream) / 2 if data_bytes < TH_DATA_LEN_EMPTY: etp = -1 data_type = DT_OMIT reason += 'small payload (%dB)' % data_bytes result = [ip_src, ip_dst, tp_srcport, tp_dstport, tp_layer, data_proto, data_type, data_bytes, etp, reason] return result etp = entropy_after_decode(data_stream) """ Determine data type: unknown, text, media, compressed, encrypted """ data_type = 'unknown' """ Step 1. Known protocols, currently HTTP, SSL, DNS, RTP """ if data_proto == 'http': """ Use Content-Encoding in http header """ if 'http_http_content_encoding' in layers_obj[LAYER_HTTP]: http_ce = layers_obj[LAYER_HTTP]['http_http_content_encoding'] if http_ce in list_compressed: reason += 'http+content encoding=%s' % http_ce data_type = DT_COMPRESSED if data_type == 'unknown': """ Use content type in HTTP header """ if 'http_http_content_type' in layers_obj[LAYER_HTTP]: http_ct = layers_obj[LAYER_HTTP]['http_http_content_type'] if http_ct.startswith('text'): data_type = DT_TEXT reason += 'http_content_type (%s)' % http_ct elif http_ct.startswith('image') or http_ct.startswith('video'): data_type = DT_MEDIA_MAGIC reason += 'http_content_type (%s)' % http_ct if data_type == 'unknown': """ If wireshark identified certain media types """ for mt in list_media_proto: if mt in list_detected_layers: data_type = DT_MEDIA_MAGIC reason += 'http+media(%s)' % mt break if data_type == 'unknown': """ If wireshark identified certain text types: text, json, xml """ for tt in list_text_proto: if tt in list_detected_layers: data_type = DT_TEXT reason += 'http+text' break elif data_proto == 'ssl': if etp > TH_ENCRYPTED: reason += 'ssl+etp>%s' % TH_ENCRYPTED data_type = DT_ENCRYPTED elif 'ssl_handshake_text' in layers_obj[LAYER_SSL]: data_type = DT_TEXT reason += 'ssl+handshake' elif data_proto == 'dns': reason += 'dns' data_type = DT_TEXT if 'text_dns_dnskey_protocol' in layers_obj[LAYER_DNS]: data_type = DT_ENCRYPTED reason += ':dnskey' elif data_proto == 'rtp': reason += 'rtp:' # eth:ethertype:ip:udp:rtp:srp:ccsrl:h245 reason += layers_obj['frame']['frame_frame_protocols'][25:] data_type = DT_MEDIA_RTP elif data_proto == 'gquic': reason +='gquic' data_type = DT_ENCRYPTED """ Step 2. Check Magic Number or other file signature """ if data_type == DT_UNKNOWN: magic_type = check_magic_number(data_stream) if magic_type is not None: reason +='magic (%s)' % magic_type if magic_type in list_compressed: """ is a compressed type """ data_type = DT_COMPRESSED print('%s COMPRESSED: %s - %s %s' % (timestamp, data_proto, tp_layer, magic_type)) elif magic_type in list_media_proto: """ is a media type """ print('%s MEDIA: %s - %s %s' % (timestamp, data_proto, tp_layer, magic_type)) data_type = DT_MEDIA_MAGIC """ Step 3. Guess from <data_bytes, entropy> """ if data_type == DT_UNKNOWN: if etp > TH_HIGH: """ SUPER HIGH """ data_type = DT_ENCRYPTED reason += 'high entropy' elif etp < TH_LOW: """ Low entropy """ if data_bytes > TH_DATA_LEN_MEANINGFUL: data_type = DT_TEXT reason += 'low entropy' """ Step 4. Omit small unknown packets """ if data_type == DT_UNKNOWN and data_bytes < TH_DATA_LEN_OMIT: reason += ' small omit' data_type = DT_OMIT """ Result: ip_src,ip_dst,srcport,dstport,tp_proto,data_proto,data_type,data_len,entropy,reason """ result = [ip_src, ip_dst, tp_srcport, tp_dstport, tp_layer, data_proto, data_type, data_bytes, etp, reason] return result
Python
def pickle_file_to_list(filename): """ A function that opens a filename and read the contents and restuns the data as list of the picklefile contained a list :param filename : str Full qualified filename :return: list Contents of the picklefile """ with open(filename, 'rb') as picklefile: return pck.load(picklefile)
def pickle_file_to_list(filename): """ A function that opens a filename and read the contents and restuns the data as list of the picklefile contained a list :param filename : str Full qualified filename :return: list Contents of the picklefile """ with open(filename, 'rb') as picklefile: return pck.load(picklefile)
Python
def list_to_pickle_file(filename, list_data): """ A function to save data in a (nested) list to a pickle file. :param filename : str Full qualified filename. Contents in an existing file will be overwritten without warning :param list_data : list The data in the form of a (nested) list to be saved in the pickle file """ with open(filename, 'wb') as picklefile: pck.dump(list_data, picklefile)
def list_to_pickle_file(filename, list_data): """ A function to save data in a (nested) list to a pickle file. :param filename : str Full qualified filename. Contents in an existing file will be overwritten without warning :param list_data : list The data in the form of a (nested) list to be saved in the pickle file """ with open(filename, 'wb') as picklefile: pck.dump(list_data, picklefile)
Python
def list_to_csv_file(filename, list_data, delimiter=','): """ A function to save data in a two dimensional list (list of lists) to a csv file. :param filename : str Full qualified filename. Contents in an existing file will be overwritten without warning :param list_data : list The data in the form of a list of lists to be saved csv file :param delimiter : str, optional The string to be used as delimiter. Default value is ','. """ file = open(filename, 'w') for row in list_data: for item in row[:-1]: file.write('{}'.format(item) + delimiter) file.write('{}\n'.format(row[-1])) file.close()
def list_to_csv_file(filename, list_data, delimiter=','): """ A function to save data in a two dimensional list (list of lists) to a csv file. :param filename : str Full qualified filename. Contents in an existing file will be overwritten without warning :param list_data : list The data in the form of a list of lists to be saved csv file :param delimiter : str, optional The string to be used as delimiter. Default value is ','. """ file = open(filename, 'w') for row in list_data: for item in row[:-1]: file.write('{}'.format(item) + delimiter) file.write('{}\n'.format(row[-1])) file.close()
Python
def invdiag(data): """ A function that takes a vector of values. Calculates the reciprocal of each value and returns a diagonalised matrix of these reciprocals. Zero values remain zero. :param data : numpy array The vector of values to be converted into an inverse diagonalized matrix. Should have one dimension only. :return: numpy array The matrix containing the inverse diagonalized values. """ result = np.zeros(data.shape) for index in np.ndindex(data.shape): if data[index] != 0: result[index] = 1 / data[index] return np.diag(result)
def invdiag(data): """ A function that takes a vector of values. Calculates the reciprocal of each value and returns a diagonalised matrix of these reciprocals. Zero values remain zero. :param data : numpy array The vector of values to be converted into an inverse diagonalized matrix. Should have one dimension only. :return: numpy array The matrix containing the inverse diagonalized values. """ result = np.zeros(data.shape) for index in np.ndindex(data.shape): if data[index] != 0: result[index] = 1 / data[index] return np.diag(result)
Python
def list_to_numpy_array(list_data, row_header_cnt, col_header_cnt): """ Takes a list of lists that contains a table with row and column headers and values, and extracts the numerical values and returns these values as two dimensional numpy array. :param list_data: list The table data, optional including row and column headers, in the form of a list of lists :param row_header_cnt: int The number of top rows occupied by the column header labels :param col_header_cnt: int The first number of columns occopied by the row header labels :return: numpy array Two dimensional numpy array containing double floating point values """ matrix = [] row_idx = 0 for list_row in list_data: if row_idx >= col_header_cnt: # skip rows with column headers matrix.append(list_row[row_header_cnt:len(list_row)]) row_idx += 1 return np.asarray(matrix, dtype=np.double)
def list_to_numpy_array(list_data, row_header_cnt, col_header_cnt): """ Takes a list of lists that contains a table with row and column headers and values, and extracts the numerical values and returns these values as two dimensional numpy array. :param list_data: list The table data, optional including row and column headers, in the form of a list of lists :param row_header_cnt: int The number of top rows occupied by the column header labels :param col_header_cnt: int The first number of columns occopied by the row header labels :return: numpy array Two dimensional numpy array containing double floating point values """ matrix = [] row_idx = 0 for list_row in list_data: if row_idx >= col_header_cnt: # skip rows with column headers matrix.append(list_row[row_header_cnt:len(list_row)]) row_idx += 1 return np.asarray(matrix, dtype=np.double)
Python
def csv_file_to_list(filename, delimiter=','): """ This function reads a csv file and returns the contents as a list of lists. :param filename : str Full qualified filename. :param delimiter : str, optional The string to be used as delimiter. Default value is ','. :return: list The content of the csv file in a list of lists """ with open(filename) as f: reader = csv.reader(f, delimiter=delimiter) d = list(reader) return d
def csv_file_to_list(filename, delimiter=','): """ This function reads a csv file and returns the contents as a list of lists. :param filename : str Full qualified filename. :param delimiter : str, optional The string to be used as delimiter. Default value is ','. :return: list The content of the csv file in a list of lists """ with open(filename) as f: reader = csv.reader(f, delimiter=delimiter) d = list(reader) return d
Python
def make_secondary(data): """ This allows to allign secondary flow in such a way that they then appear in the IOT Primary Products' positions C_WOOD: 57 C_PULP: 59 C_PLAS: 85 C_GLAS: 96 C_CMNT: 100 C_STEL: 103 C_PREM: 105 C_ALUM: 107 C_LZTP: 109 C_COPP: 111 C_ONFM: 113 C_CONS: 149 Primary Sectors'positions: A_WOOD: 49 A_PULP: 51 A_PLAS: 58 A_GLAS: 64 A_CMNT: 68 A_STEL: 71 A_PREM: 73 A_ALUM: 75 A_LZTP: 77 A_COPP: 79 A_ONFM: 81 A_CONS: 112 """ V = data.supply U = data.use Y = data.final_use products = np.array([57, 59, 85, 96, 100, 103, 105, 107, 109, 111, 113, 149]) industries = np.array([49, 51, 58, 64, 68, 71, 73, 75, 77, 79, 81, 112]) no_countries = int(len(Y)/200) prod_or = make_coord_array(products, no_countries, 200) ind_or = make_coord_array(industries, no_countries, 163) moved = allocate_sec_mat(V, U, Y, prod_or, ind_or) V = moved["V"] U = moved["U"] Y = moved["Y"] data.supply = V data.use = U data.final_use = Y return(data)
def make_secondary(data): """ This allows to allign secondary flow in such a way that they then appear in the IOT Primary Products' positions C_WOOD: 57 C_PULP: 59 C_PLAS: 85 C_GLAS: 96 C_CMNT: 100 C_STEL: 103 C_PREM: 105 C_ALUM: 107 C_LZTP: 109 C_COPP: 111 C_ONFM: 113 C_CONS: 149 Primary Sectors'positions: A_WOOD: 49 A_PULP: 51 A_PLAS: 58 A_GLAS: 64 A_CMNT: 68 A_STEL: 71 A_PREM: 73 A_ALUM: 75 A_LZTP: 77 A_COPP: 79 A_ONFM: 81 A_CONS: 112 """ V = data.supply U = data.use Y = data.final_use products = np.array([57, 59, 85, 96, 100, 103, 105, 107, 109, 111, 113, 149]) industries = np.array([49, 51, 58, 64, 68, 71, 73, 75, 77, 79, 81, 112]) no_countries = int(len(Y)/200) prod_or = make_coord_array(products, no_countries, 200) ind_or = make_coord_array(industries, no_countries, 163) moved = allocate_sec_mat(V, U, Y, prod_or, ind_or) V = moved["V"] U = moved["U"] Y = moved["Y"] data.supply = V data.use = U data.final_use = Y return(data)
Python
def allocate_sec_mat(V, U, Y, prod_or, ind_or): """ This function allows to move the primary material output from the secondary material industries to the secondary material output. This allows for the presence of secondary materials in the IOT once they are transformed from SUTS. prod_or = row position of the primary supplied material ind_or = colum pos. of the primary industry supplying primary material """ V = V.copy() U = U.copy() Y = Y.copy() # position of the secondary material des_prod_ix_pos = prod_or + 1 des_ind_col_pos = ind_or + 1 # getting the value of secondary material from the supply table # which is placed on the primary material row misplaced = V[np.ix_(prod_or, des_ind_col_pos)] # placing the misplaced value to the secondary material row V[np.ix_(des_prod_ix_pos, des_ind_col_pos)] = misplaced # collecting how much of the primary material is consumed by final demand # to be subtracted from the supply value # matrix of primary sectors x all products (588 x 7987) prim_sec_supply_trans = V[np.ix_(prod_or)] # scalar value of sum total primary industry supply # prim_sec_tot_output = np.sum(prim_sec_supply_trans) prim_sec_tot_output = np.sum(prim_sec_supply_trans, axis=1) # matrix of secondary product supply by secondary industry (588 x 588) sec_supply_trans = V[np.ix_(des_prod_ix_pos, des_ind_col_pos)] # vector of total secondary industry output (588) sec_output = np.sum(sec_supply_trans, axis=1) # vector of ratios between secondary output per industry and sum total # industry supply (diagonalised 588 x 588) ratio_prim_sec = np.zeros((len(sec_output))) for idx in range(0, len(sec_output)): if prim_sec_tot_output[idx] != 0: ratio_prim_sec[idx] = sec_output[idx] / prim_sec_tot_output[idx] ratio_prim_sec = np.diag(ratio_prim_sec) # ratio_prim_sec = np.diag(np.divide(sec_output, prim_sec_tot_output)) # ratio_prim_sec[ratio_prim_sec == [np.nan, np.inf]] = 0 prim_sec_use_trans = U[np.ix_(prod_or)] prim_sec_fin_dem_trans = Y[np.ix_(prod_or)] eye = np.identity(len(ratio_prim_sec)) U[np.ix_(prod_or)] = (eye - ratio_prim_sec) @ prim_sec_use_trans U[np.ix_(des_prod_ix_pos)] = ratio_prim_sec @ prim_sec_use_trans Y[np.ix_(prod_or)] = (eye - ratio_prim_sec) @ prim_sec_fin_dem_trans Y[np.ix_(des_prod_ix_pos)] = ratio_prim_sec @ prim_sec_fin_dem_trans V[np.ix_(prod_or, des_ind_col_pos)] = 0 output = {"V": V, "U": U, "Y": Y} print('splitting off secondary materials ready') return output
def allocate_sec_mat(V, U, Y, prod_or, ind_or): """ This function allows to move the primary material output from the secondary material industries to the secondary material output. This allows for the presence of secondary materials in the IOT once they are transformed from SUTS. prod_or = row position of the primary supplied material ind_or = colum pos. of the primary industry supplying primary material """ V = V.copy() U = U.copy() Y = Y.copy() # position of the secondary material des_prod_ix_pos = prod_or + 1 des_ind_col_pos = ind_or + 1 # getting the value of secondary material from the supply table # which is placed on the primary material row misplaced = V[np.ix_(prod_or, des_ind_col_pos)] # placing the misplaced value to the secondary material row V[np.ix_(des_prod_ix_pos, des_ind_col_pos)] = misplaced # collecting how much of the primary material is consumed by final demand # to be subtracted from the supply value # matrix of primary sectors x all products (588 x 7987) prim_sec_supply_trans = V[np.ix_(prod_or)] # scalar value of sum total primary industry supply # prim_sec_tot_output = np.sum(prim_sec_supply_trans) prim_sec_tot_output = np.sum(prim_sec_supply_trans, axis=1) # matrix of secondary product supply by secondary industry (588 x 588) sec_supply_trans = V[np.ix_(des_prod_ix_pos, des_ind_col_pos)] # vector of total secondary industry output (588) sec_output = np.sum(sec_supply_trans, axis=1) # vector of ratios between secondary output per industry and sum total # industry supply (diagonalised 588 x 588) ratio_prim_sec = np.zeros((len(sec_output))) for idx in range(0, len(sec_output)): if prim_sec_tot_output[idx] != 0: ratio_prim_sec[idx] = sec_output[idx] / prim_sec_tot_output[idx] ratio_prim_sec = np.diag(ratio_prim_sec) # ratio_prim_sec = np.diag(np.divide(sec_output, prim_sec_tot_output)) # ratio_prim_sec[ratio_prim_sec == [np.nan, np.inf]] = 0 prim_sec_use_trans = U[np.ix_(prod_or)] prim_sec_fin_dem_trans = Y[np.ix_(prod_or)] eye = np.identity(len(ratio_prim_sec)) U[np.ix_(prod_or)] = (eye - ratio_prim_sec) @ prim_sec_use_trans U[np.ix_(des_prod_ix_pos)] = ratio_prim_sec @ prim_sec_use_trans Y[np.ix_(prod_or)] = (eye - ratio_prim_sec) @ prim_sec_fin_dem_trans Y[np.ix_(des_prod_ix_pos)] = ratio_prim_sec @ prim_sec_fin_dem_trans V[np.ix_(prod_or, des_ind_col_pos)] = 0 output = {"V": V, "U": U, "Y": Y} print('splitting off secondary materials ready') return output
Python
def main(data_dir, model, make_secondary): """" added model so that this module can be use as interface to call the specific model types """ # SETTINGS use_filename = "U.npy" supply_filename = "V.npy" finaldemands_filename = "Y.npy" factorinputs_filename = "W.npy" extensions_filename = "M.npy" # CREATE CANONICAL FILENAMES full_use_fn = os.path.join(data_dir, use_filename) full_supply_fn = os.path.join(data_dir, supply_filename) full_finaldemands_fn = os.path.join(data_dir, finaldemands_filename) full_factor_inputs_fn = os.path.join(data_dir, factorinputs_filename) full_extensions_fn = os.path.join(data_dir, extensions_filename) # LOAD FILES AND CREATE SUT DATA TRANSFER OBJECT sut = st.Sut() sut.use = np.load(full_use_fn) sut.supply = np.load(full_supply_fn) sut.final_use = np.load(full_finaldemands_fn) sut.factor_inputs = np.load(full_factor_inputs_fn) sut.extensions = np.load(full_extensions_fn) # should add one for final demand emissions # CREATE PXP-ITA IOT md_b = mb.TransformationModelB(sut, make_secondary) # model_b = md_b.io_coefficient_matrix() # CHECK IO TABLE if not md_b.check_io_transaction_matrix(): print('Model B transaction matrix not correct') if not md_b.check_io_coefficients_matrix(): print('Model B coefficients matrix not correct') if not md_b.check_ext_transaction_matrix(): print('Model B extension matrix not correct') if not md_b.check_ext_coefficient_matrix(): print('Model B extension coefficients matrix not correct') return(md_b)
def main(data_dir, model, make_secondary): """" added model so that this module can be use as interface to call the specific model types """ # SETTINGS use_filename = "U.npy" supply_filename = "V.npy" finaldemands_filename = "Y.npy" factorinputs_filename = "W.npy" extensions_filename = "M.npy" # CREATE CANONICAL FILENAMES full_use_fn = os.path.join(data_dir, use_filename) full_supply_fn = os.path.join(data_dir, supply_filename) full_finaldemands_fn = os.path.join(data_dir, finaldemands_filename) full_factor_inputs_fn = os.path.join(data_dir, factorinputs_filename) full_extensions_fn = os.path.join(data_dir, extensions_filename) # LOAD FILES AND CREATE SUT DATA TRANSFER OBJECT sut = st.Sut() sut.use = np.load(full_use_fn) sut.supply = np.load(full_supply_fn) sut.final_use = np.load(full_finaldemands_fn) sut.factor_inputs = np.load(full_factor_inputs_fn) sut.extensions = np.load(full_extensions_fn) # should add one for final demand emissions # CREATE PXP-ITA IOT md_b = mb.TransformationModelB(sut, make_secondary) # model_b = md_b.io_coefficient_matrix() # CHECK IO TABLE if not md_b.check_io_transaction_matrix(): print('Model B transaction matrix not correct') if not md_b.check_io_coefficients_matrix(): print('Model B coefficients matrix not correct') if not md_b.check_ext_transaction_matrix(): print('Model B extension matrix not correct') if not md_b.check_ext_coefficient_matrix(): print('Model B extension coefficients matrix not correct') return(md_b)
Python
def upload_files(bucketName): """Upload files to GCP bucket.""" files = [f for f in listdir(localFolder) if isfile(join(localFolder, f))] for file in files: localFile = localFolder + file blob = bucket.blob(file) blob.upload_from_filename(localFile) return f'Uploaded {files} to "{bucketName}" bucket.'
def upload_files(bucketName): """Upload files to GCP bucket.""" files = [f for f in listdir(localFolder) if isfile(join(localFolder, f))] for file in files: localFile = localFolder + file blob = bucket.blob(file) blob.upload_from_filename(localFile) return f'Uploaded {files} to "{bucketName}" bucket.'
Python
def list_files(bucketName): """List all files in GCP bucket.""" files = bucket.list_blobs() fileList = [file.name for file in files if '.' in file.name] return fileList
def list_files(bucketName): """List all files in GCP bucket.""" files = bucket.list_blobs() fileList = [file.name for file in files if '.' in file.name] return fileList
Python
def download_random_file(bucketName, localFolder): """Download random file from GCP bucket.""" fileList = list_files(bucketName) for files in range(len(fileList)): blob = bucket.blob(fileList[files]) fileName = blob.name.split('/')[-1] blob.download_to_filename(localFolder + fileName) return f'{fileList} downloaded from bucket.'
def download_random_file(bucketName, localFolder): """Download random file from GCP bucket.""" fileList = list_files(bucketName) for files in range(len(fileList)): blob = bucket.blob(fileList[files]) fileName = blob.name.split('/')[-1] blob.download_to_filename(localFolder + fileName) return f'{fileList} downloaded from bucket.'
Python
def delete_file(bucketName): """Delete file from GCP bucket.""" fileList = list_files(bucketName) for files in range(len(fileList)): bucket.delete_blob(fileList[files]) return f'{fileList} deleted from bucket.'
def delete_file(bucketName): """Delete file from GCP bucket.""" fileList = list_files(bucketName) for files in range(len(fileList)): bucket.delete_blob(fileList[files]) return f'{fileList} deleted from bucket.'
Python
async def change_config(self, ctx, option='none'): """Allows the bot owner to change the bots config settings""" split = ctx.message.content.split(" ") string = "" for i in range(len(split) - 2): string = string + f" {split[i+2]}" new_value = string[1:] if option != 'none': if new_value != 'none': if option in check_options(): if new_value in check_options(option) or "ANY" in check_options(option): set_current(option, new_value) await ctx.send(f"successfully set `{option}` to `{new_value}`") else: await ctx.send(f"available options: `{array_to_comma_list(check_options(option))}`") else: await ctx.send(f"I don't see that option, available options: `{array_to_comma_list(check_options(option))}`") else: await ctx.send(f"available options: `{array_to_comma_list(check_options(option))}`") else: await ctx.send(f"available options: `{array_to_comma_list(check_options())}`")
async def change_config(self, ctx, option='none'): """Allows the bot owner to change the bots config settings""" split = ctx.message.content.split(" ") string = "" for i in range(len(split) - 2): string = string + f" {split[i+2]}" new_value = string[1:] if option != 'none': if new_value != 'none': if option in check_options(): if new_value in check_options(option) or "ANY" in check_options(option): set_current(option, new_value) await ctx.send(f"successfully set `{option}` to `{new_value}`") else: await ctx.send(f"available options: `{array_to_comma_list(check_options(option))}`") else: await ctx.send(f"I don't see that option, available options: `{array_to_comma_list(check_options(option))}`") else: await ctx.send(f"available options: `{array_to_comma_list(check_options(option))}`") else: await ctx.send(f"available options: `{array_to_comma_list(check_options())}`")
Python
async def servers(self, ctx): """The amount of servers the bot is in""" members = 0 for guild in self.bot.guilds: members += guild.member_count await ctx.send(f"I am currently in {len(self.bot.guilds)} servers with a total of {members} members")
async def servers(self, ctx): """The amount of servers the bot is in""" members = 0 for guild in self.bot.guilds: members += guild.member_count await ctx.send(f"I am currently in {len(self.bot.guilds)} servers with a total of {members} members")
Python
async def allow(self, ctx, member: discord.Member): """Allow someone to join the voice channel you are in one time only""" channel = ctx.author.voice.channel if ctx.author.voice: await channel.set_permissions(member, connect=True, speak=True) await ctx.send(f"allowing `{member}` to join `{channel.name}` one time only for the next 500 seconds") # todo:Themi This try statement is very gross, when msg is a success it falls under an except saying: 'bool' object is not callable. Although is dosen't cause any problems this looks and feels gross try: msg = await self.bot.wait_for('voice_state_update', check=in_channel(member, channel), timeout=500) await channel.set_permissions(member, overwrite=None) except asyncio.TimeoutError: await channel.set_permissions(member, overwrite=None) except Exception: await channel.set_permissions(member, overwrite=None) else: pass else: ctx.send(f"@{ctx.author} You are not in a voice channel")
async def allow(self, ctx, member: discord.Member): """Allow someone to join the voice channel you are in one time only""" channel = ctx.author.voice.channel if ctx.author.voice: await channel.set_permissions(member, connect=True, speak=True) await ctx.send(f"allowing `{member}` to join `{channel.name}` one time only for the next 500 seconds") # todo:Themi This try statement is very gross, when msg is a success it falls under an except saying: 'bool' object is not callable. Although is dosen't cause any problems this looks and feels gross try: msg = await self.bot.wait_for('voice_state_update', check=in_channel(member, channel), timeout=500) await channel.set_permissions(member, overwrite=None) except asyncio.TimeoutError: await channel.set_permissions(member, overwrite=None) except Exception: await channel.set_permissions(member, overwrite=None) else: pass else: ctx.send(f"@{ctx.author} You are not in a voice channel")
Python
async def dadjoke(self, ctx): """Get a dad joke from the dada-base (aka icanhazdadjoke.com)""" joke = requests.get(url='https://icanhazdadjoke.com/', headers={'User-Agent': 'zikuuu discord bot (https://github.com/Themis3000/zikuuu)', 'Accept': 'text/plain'}).text.replace("’", "'") await ctx.send(joke)
async def dadjoke(self, ctx): """Get a dad joke from the dada-base (aka icanhazdadjoke.com)""" joke = requests.get(url='https://icanhazdadjoke.com/', headers={'User-Agent': 'zikuuu discord bot (https://github.com/Themis3000/zikuuu)', 'Accept': 'text/plain'}).text.replace("’", "'") await ctx.send(joke)
Python
def _get_number(self, row): """ read a row as a number """ return int(row.strip())
def _get_number(self, row): """ read a row as a number """ return int(row.strip())
Python
def wrapper(obj, request): """ Check api_token for all types of requests :param obj: APIView instance :param request: Django request :return: Function call send JsonResponse with fail message """ request_types = ["GET", "POST", "HEAD", "DELETE", "PATCH", "PUT"] for request_type in request_types: if hasattr(request, request_type) is not None: if ( getattr(request, request_type).get("api_token") == settings.API_TOKEN_HASHED ): return func(obj, request) return APIResponse( status=HTTPStatus.BAD_REQUEST, details="Invalid api_token parameter.", ).json
def wrapper(obj, request): """ Check api_token for all types of requests :param obj: APIView instance :param request: Django request :return: Function call send JsonResponse with fail message """ request_types = ["GET", "POST", "HEAD", "DELETE", "PATCH", "PUT"] for request_type in request_types: if hasattr(request, request_type) is not None: if ( getattr(request, request_type).get("api_token") == settings.API_TOKEN_HASHED ): return func(obj, request) return APIResponse( status=HTTPStatus.BAD_REQUEST, details="Invalid api_token parameter.", ).json
Python
def logp(self, time, delta_1, delta_2, x): """ time_1: array time in the first dimension. time_2: array time in the second dimension. delta_1: array event indicator in the first dimension. delta_2: array event re-arrange for the vector needed in the gamma_frac x: array covariate information (independent component) """ ## define local instances of the globally initiated variables theta = self.theta coeffs_all = self.coeffs_all lams = self.lams rhos = self.rhos rs = self.rs linear = tt.dot(coeffs_all, x.T).T gamma_frac = tt.dot(delta_2, tt.log(theta**(-1) + tt.arange(self.k))) #linear = tt.dot(β,X.T).T# this is the correct formulation weib_base_haz = lams*rhos*time**(rhos-1) #weib haz weib_base_cumhaz = lams*time**(rhos) # cumulative ha phi_1 = tt.log(weib_base_haz*np.exp(linear)) phi_2 = tt.log((1+rs*weib_base_cumhaz*np.exp(linear))) failed_component = tt.sum(delta_1*phi_1, axis = 1)-tt.sum(delta_1*phi_2, axis = 1) psi = tt.log(tt.sum(tt.log(1+rs*weib_base_cumhaz*tt.exp(linear))/rs,axis=1)+theta**(-1)) # second component for all the censored observations one_k = tt.ones(self.k) second = (theta**(-1)+tt.dot(delta_1, one_k))*psi # define log likelihood return gamma_frac + failed_component + theta**(-1)*tt.log(theta**(-1)) - second
def logp(self, time, delta_1, delta_2, x): """ time_1: array time in the first dimension. time_2: array time in the second dimension. delta_1: array event indicator in the first dimension. delta_2: array event re-arrange for the vector needed in the gamma_frac x: array covariate information (independent component) """ ## define local instances of the globally initiated variables theta = self.theta coeffs_all = self.coeffs_all lams = self.lams rhos = self.rhos rs = self.rs linear = tt.dot(coeffs_all, x.T).T gamma_frac = tt.dot(delta_2, tt.log(theta**(-1) + tt.arange(self.k))) #linear = tt.dot(β,X.T).T# this is the correct formulation weib_base_haz = lams*rhos*time**(rhos-1) #weib haz weib_base_cumhaz = lams*time**(rhos) # cumulative ha phi_1 = tt.log(weib_base_haz*np.exp(linear)) phi_2 = tt.log((1+rs*weib_base_cumhaz*np.exp(linear))) failed_component = tt.sum(delta_1*phi_1, axis = 1)-tt.sum(delta_1*phi_2, axis = 1) psi = tt.log(tt.sum(tt.log(1+rs*weib_base_cumhaz*tt.exp(linear))/rs,axis=1)+theta**(-1)) # second component for all the censored observations one_k = tt.ones(self.k) second = (theta**(-1)+tt.dot(delta_1, one_k))*psi # define log likelihood return gamma_frac + failed_component + theta**(-1)*tt.log(theta**(-1)) - second
Python
def _get_priors(self, model=None, name=''): """Return prior distributions of the likelihood. Returns ------- dict : mapping name -> pymc3 distribution """ if name: name = '{}_'.format(name) model = modelcontext(model) priors = {} for key, val in self.priors.items(): if isinstance(val, numbers.Number): priors[key] = val else: priors[key] = model.Var('{}{}'.format(name, key), val) return priors
def _get_priors(self, model=None, name=''): """Return prior distributions of the likelihood. Returns ------- dict : mapping name -> pymc3 distribution """ if name: name = '{}_'.format(name) model = modelcontext(model) priors = {} for key, val in self.priors.items(): if isinstance(val, numbers.Number): priors[key] = val else: priors[key] = model.Var('{}{}'.format(name, key), val) return priors
Python
def create_likelihood(self, name, y_est, y_data, e_data, model=None): """Create likelihood distribution of observed data. Parameters ---------- y_est : theano.tensor Estimate of dependent variable y_data : array Observed dependent variable e_data: array Observed censoring indicator """ priors = self._get_priors(model=model, name=name) # Wrap y_est in link function priors[self.parent] = y_est if name: name = '{}_'.format(name) return self.likelihood('{}y'.format(name), observed={'value':y_data, 'event': e_data}, **priors)
def create_likelihood(self, name, y_est, y_data, e_data, model=None): """Create likelihood distribution of observed data. Parameters ---------- y_est : theano.tensor Estimate of dependent variable y_data : array Observed dependent variable e_data: array Observed censoring indicator """ priors = self._get_priors(model=model, name=name) # Wrap y_est in link function priors[self.parent] = y_est if name: name = '{}_'.format(name) return self.likelihood('{}y'.format(name), observed={'value':y_data, 'event': e_data}, **priors)
Python
def create_likelihood(self, name, indep_1, indep_2, time_1, time_2, e_1, e_2, model=None): """Create likelihood distribution of observed data. Parameters ---------- """ priors = self._get_priors(model=model, name=name) priors[self.parent_1] = indep_1 priors[self.parent_2] = indep_2 if name: name = '{}_'.format(name) return self.likelihood('{}y'.format(name), observed={"time_1":time_1, "time_2":time_2,'delta_1':e_1, 'delta_2': e_2}, **priors)
def create_likelihood(self, name, indep_1, indep_2, time_1, time_2, e_1, e_2, model=None): """Create likelihood distribution of observed data. Parameters ---------- """ priors = self._get_priors(model=model, name=name) priors[self.parent_1] = indep_1 priors[self.parent_2] = indep_2 if name: name = '{}_'.format(name) return self.likelihood('{}y'.format(name), observed={"time_1":time_1, "time_2":time_2,'delta_1':e_1, 'delta_2': e_2}, **priors)
Python
def create_likelihood(self, name, coeffs_all, theta, rhos, lams, rs, time, event, event_change, x, total_size,k, model=None): """Create likelihood distribution of observed data. Parameters ---------- """ priors = self._get_priors(model=model, name=name) priors[self.parent_1] = coeffs_all priors[self.parent_2] = theta priors[self.parent_3] = rhos priors[self.parent_4] = lams priors[self.parent_5] = rs if name: name = '{}_'.format(name) ## new ### Here's where we pass the minibatch generator if we want minibatch advi ## assume minibatch corresponds to minibatch size # if a minibatch, we need the total size if str(time) == 'Minibatch': return self.likelihood('{}y'.format(name), observed={"time":time,'delta_1': event, 'delta_2': event_change, 'x': x}, total_size = total_size, k=k, **priors) else: return self.likelihood('{}y'.format(name), observed={"time":time,'delta_1': event, 'delta_2': event_change, 'x': x},k=k, **priors)
def create_likelihood(self, name, coeffs_all, theta, rhos, lams, rs, time, event, event_change, x, total_size,k, model=None): """Create likelihood distribution of observed data. Parameters ---------- """ priors = self._get_priors(model=model, name=name) priors[self.parent_1] = coeffs_all priors[self.parent_2] = theta priors[self.parent_3] = rhos priors[self.parent_4] = lams priors[self.parent_5] = rs if name: name = '{}_'.format(name) ## new ### Here's where we pass the minibatch generator if we want minibatch advi ## assume minibatch corresponds to minibatch size # if a minibatch, we need the total size if str(time) == 'Minibatch': return self.likelihood('{}y'.format(name), observed={"time":time,'delta_1': event, 'delta_2': event_change, 'x': x}, total_size = total_size, k=k, **priors) else: return self.likelihood('{}y'.format(name), observed={"time":time,'delta_1': event, 'delta_2': event_change, 'x': x},k=k, **priors)
Python
def sim_Weibull(N, lam, rho, beta, rateC, maxtime): ''' Function to simulate weibull survival times with exponential censoring according to the weibull PH model Parameters ---------- N : Number of samples to generate lam : scale parameter rho : shape parameter beta : effect size coefficients rateC : censoring rate of exponential censoring time maxtime : maximum study time ''' x = np.random.binomial(n=1,p=.5,size =N) U = np.random.uniform(size=N) Tlat = (-np.log(U)/(lam*np.exp(x*beta)))**(1/rho) #probability integral transform C = np.random.exponential(scale=1/rateC, size = N) C[C > maxtime] = maxtime # follow-up times and event indicators time = np.min(np.asarray([Tlat,C]),axis = 0) status = Tlat <= C out = pd.DataFrame(np.array([time, status, x]).T) out.columns = ["time", "event", "x"] return(out)
def sim_Weibull(N, lam, rho, beta, rateC, maxtime): ''' Function to simulate weibull survival times with exponential censoring according to the weibull PH model Parameters ---------- N : Number of samples to generate lam : scale parameter rho : shape parameter beta : effect size coefficients rateC : censoring rate of exponential censoring time maxtime : maximum study time ''' x = np.random.binomial(n=1,p=.5,size =N) U = np.random.uniform(size=N) Tlat = (-np.log(U)/(lam*np.exp(x*beta)))**(1/rho) #probability integral transform C = np.random.exponential(scale=1/rateC, size = N) C[C > maxtime] = maxtime # follow-up times and event indicators time = np.min(np.asarray([Tlat,C]),axis = 0) status = Tlat <= C out = pd.DataFrame(np.array([time, status, x]).T) out.columns = ["time", "event", "x"] return(out)
Python
def sim_transform_Weibull(N, lam, rho, beta, rateC, maxtime, r): ''' Function to simulate transformed weibull survival times with exponential censoring according to the weibull PH model Parameters ---------- N : Number of samples to generate lam : scale parameter rho : shape parameter beta : effect size coefficients rateC : censoring rate of exponential censoring time maxtime : maximum study time r : transformation parameter ''' x = np.random.binomial(n=1,p=.5,size =N) U = np.random.uniform(size=N) Tlat = ((np.exp(-np.log(U)*r)-1)/(lam*r*np.exp(x*beta)))**(1/rho) #probability integral transform C = np.random.exponential(scale=1/rateC, size = N) C[C > maxtime] = maxtime # follow-up times and event indicators time = np.min(np.asarray([Tlat,C]),axis = 0) status = Tlat <= C out = pd.DataFrame(np.array([time, status, x]).T) out.columns = ["time", "event", "x"] return(out)
def sim_transform_Weibull(N, lam, rho, beta, rateC, maxtime, r): ''' Function to simulate transformed weibull survival times with exponential censoring according to the weibull PH model Parameters ---------- N : Number of samples to generate lam : scale parameter rho : shape parameter beta : effect size coefficients rateC : censoring rate of exponential censoring time maxtime : maximum study time r : transformation parameter ''' x = np.random.binomial(n=1,p=.5,size =N) U = np.random.uniform(size=N) Tlat = ((np.exp(-np.log(U)*r)-1)/(lam*r*np.exp(x*beta)))**(1/rho) #probability integral transform C = np.random.exponential(scale=1/rateC, size = N) C[C > maxtime] = maxtime # follow-up times and event indicators time = np.min(np.asarray([Tlat,C]),axis = 0) status = Tlat <= C out = pd.DataFrame(np.array([time, status, x]).T) out.columns = ["time", "event", "x"] return(out)
Python
def sim_weibull_frail_generalized(betas, theta, X, lam, r, rho, maxtime, cens_end, n, k, first = False): ''' Function to simulate transformed weibull survival times with uniform censoring according to the weibull PH model Parameters ---------- betas : effect sizes lam : scale parameters for different levels (must be dimension kx1) theta : parameter of gamma distribution of frailties rho : shape parameters for each level (kx1) X1 : covariates maxtime : maximum study time r : transformation parameter k : number of outcomes first : boolean, do we want to return just the time to first event ''' w = np.random.gamma(size = n, shape=theta**(-1), scale = theta) ## from probability integral transform Te = ((np.exp(-(np.log(np.random.uniform(size=(n,k)))*r)/w[:,None])-1)/(r*lam*np.exp(np.dot(X,betas.T))))**(1/rho) # Do we want time to first event r? # generate censoring time, unif and truncated by tau if first == True: Cens = 1+cens_end*np.random.uniform(size = n) Cens[Cens>maxtime] = maxtime alltimes = np.vstack((Cens,Te.T)).T eventType = [] for i in range(len(w)): eventType.append(np.where(alltimes[i,]==np.amin(alltimes[i,]))[0][0]) obs_t = list(np.amin(alltimes,axis = 1)) out = pd.DataFrame(np.array([obs_t, eventType, pd.Series(X[:,[0]][:,0]),pd.Series(X[:,[1]][:,0]),w])).T # Clean up for the covariates out.columns = ["obs_t", "eventType", "sex", "age", "sim_frail"] else: Cens = 1+cens_end*np.random.uniform(size = (n,k)) Cens[Cens>maxtime] = maxtime results = np.repeat(0, n) names_df = ["del"] # loop over levels for level in range(k): obs_t = np.amin(np.array([Te[:,level], Cens[:,level]]).T, axis =1) # observed time names_df = np.append(names_df, "time_"+str(level+1)) delta = (Te[:,level] >= Cens[:,level]) + 0. # censoring indicator names_df = np.append(names_df, "delta_"+str(level+1)) results = np.vstack((results, obs_t)) results = pd.DataFrame(np.vstack((results, delta))) # Names of X x_names = ["X_"+str(j+1) for j in np.arange(X.shape[1])] names_df = np.append(names_df, x_names) names_df = np.append(names_df, "frailty") # now add frailty out = pd.DataFrame(np.vstack((results, X.T, w.T)).T) out.columns = names_df out = out.iloc[:, out.columns!="del"] # get rid of extra column return(out)
def sim_weibull_frail_generalized(betas, theta, X, lam, r, rho, maxtime, cens_end, n, k, first = False): ''' Function to simulate transformed weibull survival times with uniform censoring according to the weibull PH model Parameters ---------- betas : effect sizes lam : scale parameters for different levels (must be dimension kx1) theta : parameter of gamma distribution of frailties rho : shape parameters for each level (kx1) X1 : covariates maxtime : maximum study time r : transformation parameter k : number of outcomes first : boolean, do we want to return just the time to first event ''' w = np.random.gamma(size = n, shape=theta**(-1), scale = theta) ## from probability integral transform Te = ((np.exp(-(np.log(np.random.uniform(size=(n,k)))*r)/w[:,None])-1)/(r*lam*np.exp(np.dot(X,betas.T))))**(1/rho) # Do we want time to first event r? # generate censoring time, unif and truncated by tau if first == True: Cens = 1+cens_end*np.random.uniform(size = n) Cens[Cens>maxtime] = maxtime alltimes = np.vstack((Cens,Te.T)).T eventType = [] for i in range(len(w)): eventType.append(np.where(alltimes[i,]==np.amin(alltimes[i,]))[0][0]) obs_t = list(np.amin(alltimes,axis = 1)) out = pd.DataFrame(np.array([obs_t, eventType, pd.Series(X[:,[0]][:,0]),pd.Series(X[:,[1]][:,0]),w])).T # Clean up for the covariates out.columns = ["obs_t", "eventType", "sex", "age", "sim_frail"] else: Cens = 1+cens_end*np.random.uniform(size = (n,k)) Cens[Cens>maxtime] = maxtime results = np.repeat(0, n) names_df = ["del"] # loop over levels for level in range(k): obs_t = np.amin(np.array([Te[:,level], Cens[:,level]]).T, axis =1) # observed time names_df = np.append(names_df, "time_"+str(level+1)) delta = (Te[:,level] >= Cens[:,level]) + 0. # censoring indicator names_df = np.append(names_df, "delta_"+str(level+1)) results = np.vstack((results, obs_t)) results = pd.DataFrame(np.vstack((results, delta))) # Names of X x_names = ["X_"+str(j+1) for j in np.arange(X.shape[1])] names_df = np.append(names_df, x_names) names_df = np.append(names_df, "frailty") # now add frailty out = pd.DataFrame(np.vstack((results, X.T, w.T)).T) out.columns = names_df out = out.iloc[:, out.columns!="del"] # get rid of extra column return(out)
Python
def sim_simple_covs(n): ''' Function to simulate simple covariates ---------- n : Number of samples to generate ''' sex = np.random.binomial(n=1,p=.5,size =n) age = np.random.gamma(size=n, shape = 10, scale = 1/.3) return(np.array([sex,age]).T)
def sim_simple_covs(n): ''' Function to simulate simple covariates ---------- n : Number of samples to generate ''' sex = np.random.binomial(n=1,p=.5,size =n) age = np.random.gamma(size=n, shape = 10, scale = 1/.3) return(np.array([sex,age]).T)
Python
def q_learning(zacetno_stanje, st_poiskusov,q=dict(), diskontiraj = 0.06, alpha = 0.6, e = 0.1): """ Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy while improving following an epsilon-greedy policy""" # Seznam vseh moznih potez iz zacetnega stanja sez_potez = zacetno_stanje.dovoljene_poteze() # q je tipa slovar slovarjev: stanje -> akcija -> verjetnost # q privzeto vrača vrednosti 0 q = dodaj_stanje_v_q(q, zacetno_stanje, sez_potez) policy = strategija(q, e) for iti_poiskus in range(st_poiskusov): #print('st. piskuasa: ', iti_poiskus) # Shrani začetno stanje: stanje = zacetno_stanje #print(stanje) # Iz danega stanja vrne slovar akcija -> verjetnost sez_akcij, verjetnosti_akcij = policy(stanje) #print(sez_akcij, verjetnosti_akcij) # Izbere akcijo glede na uteži (podane verjetnosti): poteza = choices(sez_akcij, weights = verjetnosti_akcij, k=1) poteza = poteza[0] #if poteza[0] == 'nalaganje': #print(poteza) # izvede potezo in dobi nagrado: prejsno_stanje = deepcopy(stanje) stanje.izvedi_potezo(poteza) nagrada = poisci_nagrado(stanje, prejsno_stanje, poteza) #print(nagrada) # Če novega stanja še ni v q ga doda, vsem akcijam iz stanja pripiše verjetnosti 0 sez_potez = stanje.dovoljene_poteze() q = dodaj_stanje_v_q(q, stanje, sez_potez) # TD Update nove_poteze = q[stanje.pretvori_v_niz()] nova_najboljsa_poteza = max(nove_poteze, key=lambda k: nove_poteze[k]) td_target = nagrada + diskontiraj * q[stanje.pretvori_v_niz()][nova_najboljsa_poteza] td_delta = td_target - q[prejsno_stanje.pretvori_v_niz()][poteza] q[prejsno_stanje.pretvori_v_niz()][poteza] += alpha * td_delta #print(q[prejsno_stanje.pretvori_v_niz()][poteza]) # Preveri ali je konec if stanje.ali_je_konec(): print('st. piskuasa: ', iti_poiskus) print('Konec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') return q zacetno_stanje = stanje return q
def q_learning(zacetno_stanje, st_poiskusov,q=dict(), diskontiraj = 0.06, alpha = 0.6, e = 0.1): """ Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy while improving following an epsilon-greedy policy""" # Seznam vseh moznih potez iz zacetnega stanja sez_potez = zacetno_stanje.dovoljene_poteze() # q je tipa slovar slovarjev: stanje -> akcija -> verjetnost # q privzeto vrača vrednosti 0 q = dodaj_stanje_v_q(q, zacetno_stanje, sez_potez) policy = strategija(q, e) for iti_poiskus in range(st_poiskusov): #print('st. piskuasa: ', iti_poiskus) # Shrani začetno stanje: stanje = zacetno_stanje #print(stanje) # Iz danega stanja vrne slovar akcija -> verjetnost sez_akcij, verjetnosti_akcij = policy(stanje) #print(sez_akcij, verjetnosti_akcij) # Izbere akcijo glede na uteži (podane verjetnosti): poteza = choices(sez_akcij, weights = verjetnosti_akcij, k=1) poteza = poteza[0] #if poteza[0] == 'nalaganje': #print(poteza) # izvede potezo in dobi nagrado: prejsno_stanje = deepcopy(stanje) stanje.izvedi_potezo(poteza) nagrada = poisci_nagrado(stanje, prejsno_stanje, poteza) #print(nagrada) # Če novega stanja še ni v q ga doda, vsem akcijam iz stanja pripiše verjetnosti 0 sez_potez = stanje.dovoljene_poteze() q = dodaj_stanje_v_q(q, stanje, sez_potez) # TD Update nove_poteze = q[stanje.pretvori_v_niz()] nova_najboljsa_poteza = max(nove_poteze, key=lambda k: nove_poteze[k]) td_target = nagrada + diskontiraj * q[stanje.pretvori_v_niz()][nova_najboljsa_poteza] td_delta = td_target - q[prejsno_stanje.pretvori_v_niz()][poteza] q[prejsno_stanje.pretvori_v_niz()][poteza] += alpha * td_delta #print(q[prejsno_stanje.pretvori_v_niz()][poteza]) # Preveri ali je konec if stanje.ali_je_konec(): print('st. piskuasa: ', iti_poiskus) print('Konec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') return q zacetno_stanje = stanje return q
Python
def q_learning(zacetno_stanje, st_poiskusov,q=dict(), diskontiraj = 0.06, alpha = 0.6, e = 0.1): """ Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy while improving following an epsilon-greedy policy""" # Seznam vseh moznih potez iz zacetnega stanja sez_potez = zacetno_stanje.socasne_poteze() # q je tipa slovar slovarjev: stanje -> akcija -> verjetnost # q privzeto vraca vrednosti 0 q = dodaj_stanje_v_q(q, zacetno_stanje, sez_potez) policy = strategija(q, e) for iti_poiskus in range(st_poiskusov): #print('st. piskuasa: ', iti_poiskus) # Shrani zacetno stanje: stanje = zacetno_stanje #print(stanje) # Iz danega stanja vrne slovar akcija -> verjetnost sez_akcij, verjetnosti_akcij = policy(stanje) socasne_pot = stanje.socasne_poteze() #print(socasne_pot) #print(sez_akcij, verjetnosti_akcij) # Izbere akcijo glede na utezi (podane verjetnosti): poteza = choices(sez_akcij, weights = verjetnosti_akcij, k=1) poteza = poteza[0] #if poteza[0] == 'nalaganje': # print(poteza) # izvede potezo in dobi nagrado: prejsno_stanje = deepcopy(stanje) nagrada = 0 if type(poteza[0]) == tuple: for pot in poteza: pr_stanje = deepcopy(stanje) stanje.izvedi_potezo(pot) nag = poisci_nagrado(stanje, pr_stanje, pot) nagrada += nag else: stanje.izvedi_potezo(poteza) nagrada = poisci_nagrado(stanje, prejsno_stanje, poteza) #print(nagrada) # ce novega stanja se ni v q ga doda, vsem akcijam iz stanja pripise verjetnosti 0 sez_potez = stanje.socasne_poteze() q = dodaj_stanje_v_q(q, stanje, sez_potez) # TD Update nove_poteze = q[stanje.pretvori_v_niz()] nova_najboljsa_poteza = max(nove_poteze, key=lambda k: nove_poteze[k]) td_target = nagrada + diskontiraj * q[stanje.pretvori_v_niz()][nova_najboljsa_poteza] td_delta = td_target - q[prejsno_stanje.pretvori_v_niz()][poteza] q[prejsno_stanje.pretvori_v_niz()][poteza] += alpha * td_delta #print(q[prejsno_stanje.pretvori_v_niz()][poteza]) # Preveri ali je konec if stanje.ali_je_konec(): print('st. piskuasa: ', iti_poiskus) print('Konec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') return q zacetno_stanje = stanje return q
def q_learning(zacetno_stanje, st_poiskusov,q=dict(), diskontiraj = 0.06, alpha = 0.6, e = 0.1): """ Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy while improving following an epsilon-greedy policy""" # Seznam vseh moznih potez iz zacetnega stanja sez_potez = zacetno_stanje.socasne_poteze() # q je tipa slovar slovarjev: stanje -> akcija -> verjetnost # q privzeto vraca vrednosti 0 q = dodaj_stanje_v_q(q, zacetno_stanje, sez_potez) policy = strategija(q, e) for iti_poiskus in range(st_poiskusov): #print('st. piskuasa: ', iti_poiskus) # Shrani zacetno stanje: stanje = zacetno_stanje #print(stanje) # Iz danega stanja vrne slovar akcija -> verjetnost sez_akcij, verjetnosti_akcij = policy(stanje) socasne_pot = stanje.socasne_poteze() #print(socasne_pot) #print(sez_akcij, verjetnosti_akcij) # Izbere akcijo glede na utezi (podane verjetnosti): poteza = choices(sez_akcij, weights = verjetnosti_akcij, k=1) poteza = poteza[0] #if poteza[0] == 'nalaganje': # print(poteza) # izvede potezo in dobi nagrado: prejsno_stanje = deepcopy(stanje) nagrada = 0 if type(poteza[0]) == tuple: for pot in poteza: pr_stanje = deepcopy(stanje) stanje.izvedi_potezo(pot) nag = poisci_nagrado(stanje, pr_stanje, pot) nagrada += nag else: stanje.izvedi_potezo(poteza) nagrada = poisci_nagrado(stanje, prejsno_stanje, poteza) #print(nagrada) # ce novega stanja se ni v q ga doda, vsem akcijam iz stanja pripise verjetnosti 0 sez_potez = stanje.socasne_poteze() q = dodaj_stanje_v_q(q, stanje, sez_potez) # TD Update nove_poteze = q[stanje.pretvori_v_niz()] nova_najboljsa_poteza = max(nove_poteze, key=lambda k: nove_poteze[k]) td_target = nagrada + diskontiraj * q[stanje.pretvori_v_niz()][nova_najboljsa_poteza] td_delta = td_target - q[prejsno_stanje.pretvori_v_niz()][poteza] q[prejsno_stanje.pretvori_v_niz()][poteza] += alpha * td_delta #print(q[prejsno_stanje.pretvori_v_niz()][poteza]) # Preveri ali je konec if stanje.ali_je_konec(): print('st. piskuasa: ', iti_poiskus) print('Konec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') return q zacetno_stanje = stanje return q
Python
def delete(self,customer_id): """ Deletes a customer's shopcart """ app.logger.info("Request to delete a shopcart for customer " + customer_id) shopcarts = Shopcart.find_by_customer_id(customer_id).all() message = [shopcart.serialize() for shopcart in shopcarts] for shopcart in shopcarts: shopcart.delete() return message, status.HTTP_204_NO_CONTENT
def delete(self,customer_id): """ Deletes a customer's shopcart """ app.logger.info("Request to delete a shopcart for customer " + customer_id) shopcarts = Shopcart.find_by_customer_id(customer_id).all() message = [shopcart.serialize() for shopcart in shopcarts] for shopcart in shopcarts: shopcart.delete() return message, status.HTTP_204_NO_CONTENT
Python
def put(self, customer_id, product_id): """ Update the quantity of an item in a Shopcart This endpoint will update a Shopcart based the body that is posted """ app.logger.info("Request to update Shopcart for costomer_id: %s", customer_id) shopcart = Shopcart.find_by_shopcart_item(customer_id, product_id) if not shopcart: abort(status.HTTP_404_NOT_FOUND, "ShopCart item for customer_id '{}' was not found.".format(customer_id)) logging.debug(shopcart) shopcart.deserialize(api.payload) shopcart.update() app.logger.info("Shopcart with custoemr_id [%s] updated.", shopcart.customer_id) return shopcart.serialize(), status.HTTP_200_OK
def put(self, customer_id, product_id): """ Update the quantity of an item in a Shopcart This endpoint will update a Shopcart based the body that is posted """ app.logger.info("Request to update Shopcart for costomer_id: %s", customer_id) shopcart = Shopcart.find_by_shopcart_item(customer_id, product_id) if not shopcart: abort(status.HTTP_404_NOT_FOUND, "ShopCart item for customer_id '{}' was not found.".format(customer_id)) logging.debug(shopcart) shopcart.deserialize(api.payload) shopcart.update() app.logger.info("Shopcart with custoemr_id [%s] updated.", shopcart.customer_id) return shopcart.serialize(), status.HTTP_200_OK
Python
def delete(self, customer_id, product_id): """ Delete a product from a shopcart """ app.logger.info("Request to delete a product from {}'s shopcart. ".format(customer_id)) product = Shopcart.find_by_shopcart_item(customer_id,product_id) if product: product.delete() app.logger.info("Product with id {} in {}'s shopcart delete completely".format(product_id,customer_id)) return "", status.HTTP_204_NO_CONTENT
def delete(self, customer_id, product_id): """ Delete a product from a shopcart """ app.logger.info("Request to delete a product from {}'s shopcart. ".format(customer_id)) product = Shopcart.find_by_shopcart_item(customer_id,product_id) if product: product.delete() app.logger.info("Product with id {} in {}'s shopcart delete completely".format(product_id,customer_id)) return "", status.HTTP_204_NO_CONTENT
Python
def post(self, customer_id): """ Add a product into the shopcart """ app.logger.info("Request to add a product into the shopcart") shopcart = Shopcart() shopcart.deserialize(api.payload) product = shopcart.find_by_shopcart_item(customer_id,shopcart.product_id) if product: abort(status.HTTP_400_BAD_REQUEST, 'Product already exist!') shopcart.create() message = shopcart.serialize() app.logger.info("Product with id [%s] added in to the customer: [%s]'s shopcart.",shopcart.product_id, shopcart.customer_id) return message, status.HTTP_201_CREATED
def post(self, customer_id): """ Add a product into the shopcart """ app.logger.info("Request to add a product into the shopcart") shopcart = Shopcart() shopcart.deserialize(api.payload) product = shopcart.find_by_shopcart_item(customer_id,shopcart.product_id) if product: abort(status.HTTP_400_BAD_REQUEST, 'Product already exist!') shopcart.create() message = shopcart.serialize() app.logger.info("Product with id [%s] added in to the customer: [%s]'s shopcart.",shopcart.product_id, shopcart.customer_id) return message, status.HTTP_201_CREATED
Python
def step_impl(context): """ Delete all Shopcarts and load new ones """ headers = {'accept': 'application/json'} # list all of the shopcarts and delete them one by one context.resp = requests.get(context.base_url + '/api/shopcarts', headers=headers) expect(context.resp.status_code).to_equal(200) for shopcart in context.resp.json(): context.resp = requests.delete(context.base_url + '/api/shopcarts/' + str(shopcart["customer_id"]) + '/products/' + str(shopcart['product_id']), headers=headers) expect(context.resp.status_code).to_equal(204) # load the database with new shopcarts for row in context.table: data = { "customer_id": int(row['Customer ID']), "product_id": int(row['Product ID']), "product_name": row['Product Name'], "product_price": float(row['Product Price']), "quantity": int(row['Product Quantity']) } create_url = context.base_url + '/api/shopcarts/' + row['Customer ID'] + '/products/' payload = json.dumps(data) context.resp = requests.post(create_url, data=payload, headers={'Content-Type': 'application/json'}) expect(context.resp.status_code).to_equal(201)
def step_impl(context): """ Delete all Shopcarts and load new ones """ headers = {'accept': 'application/json'} # list all of the shopcarts and delete them one by one context.resp = requests.get(context.base_url + '/api/shopcarts', headers=headers) expect(context.resp.status_code).to_equal(200) for shopcart in context.resp.json(): context.resp = requests.delete(context.base_url + '/api/shopcarts/' + str(shopcart["customer_id"]) + '/products/' + str(shopcart['product_id']), headers=headers) expect(context.resp.status_code).to_equal(204) # load the database with new shopcarts for row in context.table: data = { "customer_id": int(row['Customer ID']), "product_id": int(row['Product ID']), "product_name": row['Product Name'], "product_price": float(row['Product Price']), "quantity": int(row['Product Quantity']) } create_url = context.base_url + '/api/shopcarts/' + row['Customer ID'] + '/products/' payload = json.dumps(data) context.resp = requests.post(create_url, data=payload, headers={'Content-Type': 'application/json'}) expect(context.resp.status_code).to_equal(201)
Python
def save_properties(data): """ Save props data into either memcache (heroku only) or pickle to local file :param data: Dictionary of props data """ if running_on_heroku: mc.set('props', data) else: with open(prop_file, 'w+') as login_prop_file: pickle.dump(data, login_prop_file)
def save_properties(data): """ Save props data into either memcache (heroku only) or pickle to local file :param data: Dictionary of props data """ if running_on_heroku: mc.set('props', data) else: with open(prop_file, 'w+') as login_prop_file: pickle.dump(data, login_prop_file)
Python
def load_properties(): """ Load properties data from either memcache (heroku only) or local pickled file :rtype : dict :return: Dictionary of all the properties """ if running_on_heroku: obj = mc.get('props') if not obj: return {} else: return obj else: if os.path.isfile(prop_file): with open(prop_file, 'r+') as login_prop_file: data = pickle.load(login_prop_file) return data else: sys.exit("No prop file found")
def load_properties(): """ Load properties data from either memcache (heroku only) or local pickled file :rtype : dict :return: Dictionary of all the properties """ if running_on_heroku: obj = mc.get('props') if not obj: return {} else: return obj else: if os.path.isfile(prop_file): with open(prop_file, 'r+') as login_prop_file: data = pickle.load(login_prop_file) return data else: sys.exit("No prop file found")
Python
def HealthPing(): """ Lightweight endpoint to check the liveness of the REST endpoint """ return "pong"
def HealthPing(): """ Lightweight endpoint to check the liveness of the REST endpoint """ return "pong"
Python
def _set_flask_app_configs(app): """ Set the configs for the flask app based on environment variables :param app: :return: """ env_to_config_map = { "FLASK_JSONIFY_PRETTYPRINT_REGULAR": "JSONIFY_PRETTYPRINT_REGULAR", "FLASK_JSON_SORT_KEYS": "JSON_SORT_KEYS", } for env_var, config_name in env_to_config_map.items(): if os.environ.get(env_var): # Environment variables come as strings, convert them to boolean bool_env_value = os.environ.get(env_var).lower() == "true" app.config[config_name] = bool_env_value
def _set_flask_app_configs(app): """ Set the configs for the flask app based on environment variables :param app: :return: """ env_to_config_map = { "FLASK_JSONIFY_PRETTYPRINT_REGULAR": "JSONIFY_PRETTYPRINT_REGULAR", "FLASK_JSON_SORT_KEYS": "JSON_SORT_KEYS", } for env_var, config_name in env_to_config_map.items(): if os.environ.get(env_var): # Environment variables come as strings, convert them to boolean bool_env_value = os.environ.get(env_var).lower() == "true" app.config[config_name] = bool_env_value
Python
def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' # TODO: Process a PIL image for use in a PyTorch model # TODO: Define your transforms for the training, validation, and testing sets #Resizing width, height = image.size size = image.size image = image.resize(size) #Center crop new_width, new_height = image.size desired_size = 224, 224 width_diff, height_diff = round(desired_size[0]), round(desired_size[1]) left = round(new_width - width_diff) / 2 top = round(new_height - height_diff) / 2 right = round(new_width + width_diff) / 2 bottom = round(new_height + height_diff) / 2 image1 = image.crop((left, top, right, bottom)) #Normalization image2 = np.array(image1) / 255 mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) np_image = (image2 - mean) / std image_final = np_image.transpose((2, 0, 1)) return image_final
def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' # TODO: Process a PIL image for use in a PyTorch model # TODO: Define your transforms for the training, validation, and testing sets #Resizing width, height = image.size size = image.size image = image.resize(size) #Center crop new_width, new_height = image.size desired_size = 224, 224 width_diff, height_diff = round(desired_size[0]), round(desired_size[1]) left = round(new_width - width_diff) / 2 top = round(new_height - height_diff) / 2 right = round(new_width + width_diff) / 2 bottom = round(new_height + height_diff) / 2 image1 = image.crop((left, top, right, bottom)) #Normalization image2 = np.array(image1) / 255 mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) np_image = (image2 - mean) / std image_final = np_image.transpose((2, 0, 1)) return image_final
Python
def predict(image_path, model, top_k, gpu_usage): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' ImageFolder = os.listdir(image_path) Random_num_dir = random.randint(0, len(ImageFolder) - 1) ImageList = os.listdir(image_path + "/" + ImageFolder[Random_num_dir]) loadedImages = [] for image in ImageList: if image[-3:] in ["png", "jpg"]: img = Image.open(image_path + "/" + ImageFolder[Random_num_dir] + "/" + image) loadedImages.append(img) Random_num = random.randint(0, len(loadedImages)) - 1 image_pc = process_image(loadedImages[Random_num]) image_show = torch.from_numpy(image_pc) image_torch = torch.from_numpy(image_pc).type(torch.FloatTensor) image_unsq = image_torch.unsqueeze_(0) if gpu_usage == "gpu": device = torch.device("cuda" if torch.cuda.is_available() else "cpu") else: device = torch.device("cpu") model.to(device) image_model = image_unsq.to(device) logps = model.forward(image_model) ps = torch.exp(logps) top_probes, top_classes = ps.topk(top_k, dim=1) return top_probes, top_classes, image_show # TODO: Implement the code to predict the class from an image file
def predict(image_path, model, top_k, gpu_usage): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' ImageFolder = os.listdir(image_path) Random_num_dir = random.randint(0, len(ImageFolder) - 1) ImageList = os.listdir(image_path + "/" + ImageFolder[Random_num_dir]) loadedImages = [] for image in ImageList: if image[-3:] in ["png", "jpg"]: img = Image.open(image_path + "/" + ImageFolder[Random_num_dir] + "/" + image) loadedImages.append(img) Random_num = random.randint(0, len(loadedImages)) - 1 image_pc = process_image(loadedImages[Random_num]) image_show = torch.from_numpy(image_pc) image_torch = torch.from_numpy(image_pc).type(torch.FloatTensor) image_unsq = image_torch.unsqueeze_(0) if gpu_usage == "gpu": device = torch.device("cuda" if torch.cuda.is_available() else "cpu") else: device = torch.device("cpu") model.to(device) image_model = image_unsq.to(device) logps = model.forward(image_model) ps = torch.exp(logps) top_probes, top_classes = ps.topk(top_k, dim=1) return top_probes, top_classes, image_show # TODO: Implement the code to predict the class from an image file
Python
def predict(image_path, model, topk=5): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' ImageFolder = os.listdir(image_path) Random_num_dir = random.randint(0, len(ImageFolder) - 1) ImageList = os.listdir(image_path + "/" + ImageFolder[Random_num_dir]) loadedImages = [] for image in ImageList: if image[-3:] in ["png", "jpg"]: img = Image.open(image_path + "/" + ImageFolder[Random_num_dir] + "/" + image) loadedImages.append(img) Random_num = random.randint(0, len(loadedImages)) - 1 image_pc = process_image(loadedImages[Random_num]) image_show = torch.from_numpy(image_pc) image_torch = torch.from_numpy(image_pc).type(torch.FloatTensor) image_unsq = image_torch.unsqueeze_(0) model.to(device) image_model = image_unsq.to(device) logps = model.forward(image_model) ps = torch.exp(logps) top_probes, top_classes = ps.topk(5, dim=1) return top_probes, top_classes, image_show # TODO: Implement the code to predict the class from an image file
def predict(image_path, model, topk=5): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' ImageFolder = os.listdir(image_path) Random_num_dir = random.randint(0, len(ImageFolder) - 1) ImageList = os.listdir(image_path + "/" + ImageFolder[Random_num_dir]) loadedImages = [] for image in ImageList: if image[-3:] in ["png", "jpg"]: img = Image.open(image_path + "/" + ImageFolder[Random_num_dir] + "/" + image) loadedImages.append(img) Random_num = random.randint(0, len(loadedImages)) - 1 image_pc = process_image(loadedImages[Random_num]) image_show = torch.from_numpy(image_pc) image_torch = torch.from_numpy(image_pc).type(torch.FloatTensor) image_unsq = image_torch.unsqueeze_(0) model.to(device) image_model = image_unsq.to(device) logps = model.forward(image_model) ps = torch.exp(logps) top_probes, top_classes = ps.topk(5, dim=1) return top_probes, top_classes, image_show # TODO: Implement the code to predict the class from an image file
Python
def traverse_grid(start_cell, direction, num_steps): """ Function that iterates through the cells in a grid in a linear direction Both start_cell is a tuple(row, col) denoting the starting cell direction is a tuple that contains difference between consecutive cells in the traversal """ for step in range(num_steps): row = start_cell[0] + step * direction[0] col = start_cell[1] + step * direction[1] print "Processing cell", (row, col), print "with value", EXAMPLE_GRID[row][col]
def traverse_grid(start_cell, direction, num_steps): """ Function that iterates through the cells in a grid in a linear direction Both start_cell is a tuple(row, col) denoting the starting cell direction is a tuple that contains difference between consecutive cells in the traversal """ for step in range(num_steps): row = start_cell[0] + step * direction[0] col = start_cell[1] + step * direction[1] print "Processing cell", (row, col), print "with value", EXAMPLE_GRID[row][col]
Python
def run_example(): """ Run several example calls of traverse_grid() """ print "Print out values in grid" for row in range(GRID_HEIGHT): print EXAMPLE_GRID[row] print print "Traversing first row" traverse_grid((0, 0), (0, 1), GRID_WIDTH) print print "Traversing second column" traverse_grid((0, 1), (1, 0), GRID_HEIGHT) print print "Traversing second column in reverse order" traverse_grid((GRID_HEIGHT - 1, 1), (-1, 0), GRID_HEIGHT) print print "Traversing diagonal" traverse_grid((0, 0), (1, 1), min(GRID_WIDTH, GRID_HEIGHT))
def run_example(): """ Run several example calls of traverse_grid() """ print "Print out values in grid" for row in range(GRID_HEIGHT): print EXAMPLE_GRID[row] print print "Traversing first row" traverse_grid((0, 0), (0, 1), GRID_WIDTH) print print "Traversing second column" traverse_grid((0, 1), (1, 0), GRID_HEIGHT) print print "Traversing second column in reverse order" traverse_grid((GRID_HEIGHT - 1, 1), (-1, 0), GRID_HEIGHT) print print "Traversing diagonal" traverse_grid((0, 0), (1, 1), min(GRID_WIDTH, GRID_HEIGHT))
Python
def run_gui(): """ Create a frame and assign draw handler """ frame = simplegui.create_frame("Indexed grid", CANVAS_WIDTH, CANVAS_HEIGHT) frame.set_canvas_background("White") frame.set_draw_handler(draw) # Start the frame animation frame.start()
def run_gui(): """ Create a frame and assign draw handler """ frame = simplegui.create_frame("Indexed grid", CANVAS_WIDTH, CANVAS_HEIGHT) frame.set_canvas_background("White") frame.set_draw_handler(draw) # Start the frame animation frame.start()
Python
def create_plots(begin, end, stride): """ Plot the function double, square, and exp from beginning to end using the provided stride The x-coordinates of the plotted points start at begin, terminate at end and are spaced by distance stride """ # generate x coordinates for plot points x_coords = [] current_x = begin while current_x < end: x_coords.append(current_x) current_x += stride # compute list of (x, y) coordinates for each function double_plot = [(x_val, double(x_val)) for x_val in x_coords] square_plot = [(x_val, square(x_val)) for x_val in x_coords] exp_plot = [(x_val, exp(x_val)) for x_val in x_coords] # plot the list of points simpleplot.plot_lines("Plots of three functions", 600, 400, "x", "f(x)", [double_plot, square_plot, exp_plot], True, ["double", "square", "exp"])
def create_plots(begin, end, stride): """ Plot the function double, square, and exp from beginning to end using the provided stride The x-coordinates of the plotted points start at begin, terminate at end and are spaced by distance stride """ # generate x coordinates for plot points x_coords = [] current_x = begin while current_x < end: x_coords.append(current_x) current_x += stride # compute list of (x, y) coordinates for each function double_plot = [(x_val, double(x_val)) for x_val in x_coords] square_plot = [(x_val, square(x_val)) for x_val in x_coords] exp_plot = [(x_val, exp(x_val)) for x_val in x_coords] # plot the list of points simpleplot.plot_lines("Plots of three functions", 600, 400, "x", "f(x)", [double_plot, square_plot, exp_plot], True, ["double", "square", "exp"])
Python
def merge(line): """ Helper function that merges a single row or column in 2048 """ # replace with your code from the previous mini-project l = len(line) s1 = [0]*l j = 0 for i in range(l): if line[i] != 0: s1[j] = line[i] j += 1 for k in range(l-1): if s1[k] == s1[k+1]: s1[k] *=2 s1.pop(k+1) s1.append(0) return s1
def merge(line): """ Helper function that merges a single row or column in 2048 """ # replace with your code from the previous mini-project l = len(line) s1 = [0]*l j = 0 for i in range(l): if line[i] != 0: s1[j] = line[i] j += 1 for k in range(l-1): if s1[k] == s1[k+1]: s1[k] *=2 s1.pop(k+1) s1.append(0) return s1
Python
def new_tile(self): """ Create a new tile in a randomly selected empty square. The tile should be 2 90% of the time and 4 10% of the time. """ # replace with your code pass
def new_tile(self): """ Create a new tile in a randomly selected empty square. The tile should be 2 90% of the time and 4 10% of the time. """ # replace with your code pass
Python
def stopwatch_format(ticks): """ Convert tenths of seconds to formatted time """ minutes = ticks // 600 # minutes = ticks // 60 tens_seconds = (ticks // 100) % 6 seconds = (ticks // 10) % 10 tenths = ticks % 10 return str(minutes) + ':' + str(tens_seconds) + \ str(seconds) + '.' + str(tenths)
def stopwatch_format(ticks): """ Convert tenths of seconds to formatted time """ minutes = ticks // 600 # minutes = ticks // 60 tens_seconds = (ticks // 100) % 6 seconds = (ticks // 10) % 10 tenths = ticks % 10 return str(minutes) + ':' + str(tens_seconds) + \ str(seconds) + '.' + str(tenths)
Python
def put(obj): """Loads object into Plasma. Will only work if plasma has been initialized with init_plasma() Parameters ---------- obj : Object that will be shared through Plasma. Returns ------- ObjectId A ObjectId generated by Plasma that can be be passed to functions that are executed in a child process by apalis. Examples -------- >>> apalis.put(1) ObjectID(bad02ba2c0f59e5f55033298520668cbf0ad1102) """ return plasma_info.plasma_client.put(obj)
def put(obj): """Loads object into Plasma. Will only work if plasma has been initialized with init_plasma() Parameters ---------- obj : Object that will be shared through Plasma. Returns ------- ObjectId A ObjectId generated by Plasma that can be be passed to functions that are executed in a child process by apalis. Examples -------- >>> apalis.put(1) ObjectID(bad02ba2c0f59e5f55033298520668cbf0ad1102) """ return plasma_info.plasma_client.put(obj)
Python
def init_plasma(mem=1000): """Initializes a Plasma object store. Args: mem (int, optional): The argument specifies the size of the store in megabytes. Defaults to 1000. Returns: (PlasmaClient): Plasma client object """ import subprocess global plasma_info if not plasma_info.init: import pyarrow.plasma as plasma plasma_info.plasma = plasma # get random string which will make it unlikely that two instances of plasma are trying to use the same file import string characters = string.ascii_uppercase + string.ascii_lowercase + string.digits characters = [c for c in characters] rstr = "".join(np.random.choice(characters, 10)) plasma_info.plasma_client_file_name = "/tmp/plasma_" + rstr PLASMA_STORE_EXECUTABLE = sys.executable[:-6]+ "plasma_store" # Run Plasma system_run(f"{PLASMA_STORE_EXECUTABLE} -m {int(mem * 1000000)} -s {plasma_info.plasma_client_file_name}") plasma_info.plasma_client = plasma.connect(plasma_info.plasma_client_file_name) plasma_info.init = True return plasma_info.plasma_client else: print("Plasma has already been initialized before.") return plasma_info.plasma_client
def init_plasma(mem=1000): """Initializes a Plasma object store. Args: mem (int, optional): The argument specifies the size of the store in megabytes. Defaults to 1000. Returns: (PlasmaClient): Plasma client object """ import subprocess global plasma_info if not plasma_info.init: import pyarrow.plasma as plasma plasma_info.plasma = plasma # get random string which will make it unlikely that two instances of plasma are trying to use the same file import string characters = string.ascii_uppercase + string.ascii_lowercase + string.digits characters = [c for c in characters] rstr = "".join(np.random.choice(characters, 10)) plasma_info.plasma_client_file_name = "/tmp/plasma_" + rstr PLASMA_STORE_EXECUTABLE = sys.executable[:-6]+ "plasma_store" # Run Plasma system_run(f"{PLASMA_STORE_EXECUTABLE} -m {int(mem * 1000000)} -s {plasma_info.plasma_client_file_name}") plasma_info.plasma_client = plasma.connect(plasma_info.plasma_client_file_name) plasma_info.init = True return plasma_info.plasma_client else: print("Plasma has already been initialized before.") return plasma_info.plasma_client
Python
def calculate_rarity(row, attribute_types): """Helper function to calculate rarity for each row in a pandas dataframe. Uses rarity.tools scoring Args: row ([pandas row]): row of a pandas dataframe attribute_types ([list]): list of attributes for the nft collection Returns: [int]: Rarity score for the nft """ score = 0 for attribute in attribute_types: score = score + 1 / row[f"{attribute}_probability"] return score
def calculate_rarity(row, attribute_types): """Helper function to calculate rarity for each row in a pandas dataframe. Uses rarity.tools scoring Args: row ([pandas row]): row of a pandas dataframe attribute_types ([list]): list of attributes for the nft collection Returns: [int]: Rarity score for the nft """ score = 0 for attribute in attribute_types: score = score + 1 / row[f"{attribute}_probability"] return score
Python
def create_nft_df(nft_info_list): """Converts the list of NFT attributes into a dataframe and includes rarity information Args: nft_info_list ([list]): List of attributes based on get_all_nft_info Returns: [pandas dataframe]: Dataframe of NFT information """ for j in nft_info_list: ipfs_id = j["image"].replace("ipfs://", "") j["ipfs_url"] = f"https://cf-ipfs.com/ipfs/{ipfs_id}" for a in j["attributes"]: j[a["trait_type"]] = a["value"] df = pd.DataFrame(nft_info_list) attribute_types = [x["trait_type"] for x in df["attributes"][0]] attribute_prob_df_dict = {} for attribute in attribute_types: attribute_df = ( df[attribute] .value_counts(dropna=False) .rename_axis(attribute) .reset_index(name=f"{attribute}_count") ) attribute_df[f"{attribute}_probability"] = ( attribute_df[f"{attribute}_count"] / attribute_df[f"{attribute}_count"].sum() ) attribute_prob_df_dict[attribute] = attribute_df for attribute in attribute_types: df = df.merge( attribute_prob_df_dict[attribute], how="left", on=attribute) df["rarity_score"] = df.apply( lambda x: calculate_rarity(x, attribute_types), axis=1 ) df = df.sort_values("rarity_score", ascending=False) df["rarity_rankings"] = range(1, len(df) + 1) return df
def create_nft_df(nft_info_list): """Converts the list of NFT attributes into a dataframe and includes rarity information Args: nft_info_list ([list]): List of attributes based on get_all_nft_info Returns: [pandas dataframe]: Dataframe of NFT information """ for j in nft_info_list: ipfs_id = j["image"].replace("ipfs://", "") j["ipfs_url"] = f"https://cf-ipfs.com/ipfs/{ipfs_id}" for a in j["attributes"]: j[a["trait_type"]] = a["value"] df = pd.DataFrame(nft_info_list) attribute_types = [x["trait_type"] for x in df["attributes"][0]] attribute_prob_df_dict = {} for attribute in attribute_types: attribute_df = ( df[attribute] .value_counts(dropna=False) .rename_axis(attribute) .reset_index(name=f"{attribute}_count") ) attribute_df[f"{attribute}_probability"] = ( attribute_df[f"{attribute}_count"] / attribute_df[f"{attribute}_count"].sum() ) attribute_prob_df_dict[attribute] = attribute_df for attribute in attribute_types: df = df.merge( attribute_prob_df_dict[attribute], how="left", on=attribute) df["rarity_score"] = df.apply( lambda x: calculate_rarity(x, attribute_types), axis=1 ) df = df.sort_values("rarity_score", ascending=False) df["rarity_rankings"] = range(1, len(df) + 1) return df
Python
def update_link_unique_ips(link, ip_address=None): ''' Update Unique IP addresses for Link. ''' link.add_unique_ip(ip_address)
def update_link_unique_ips(link, ip_address=None): ''' Update Unique IP addresses for Link. ''' link.add_unique_ip(ip_address)
Python
def update_link_regions(link, ip_address=None): ''' Create or Update Region objects for Link. ''' # Initialize GeoIP object. g = GeoIP2() country = None # Attempt to get country for ip address. try: data = g.country(ip_address) country = data.get('country_name') code = data.get('country_code') # Get or create country if country does not exist. if country: country, created = Country.objects.get_or_create( name=country, code=code ) except (TypeError, geoip2.errors.AddressNotFoundError): # Ignore the cases where `ip_address` is None, or # the ip address does not exist in the GeoIP2 database. pass # Get or create Region object, where country is either an object or None. region, created = Region.objects.get_or_create(link=link, country=country) # Update last visited, clicks, and save changes. region.last_visited = timezone.now() region.total_clicks = F('total_clicks') + 1 region.save()
def update_link_regions(link, ip_address=None): ''' Create or Update Region objects for Link. ''' # Initialize GeoIP object. g = GeoIP2() country = None # Attempt to get country for ip address. try: data = g.country(ip_address) country = data.get('country_name') code = data.get('country_code') # Get or create country if country does not exist. if country: country, created = Country.objects.get_or_create( name=country, code=code ) except (TypeError, geoip2.errors.AddressNotFoundError): # Ignore the cases where `ip_address` is None, or # the ip address does not exist in the GeoIP2 database. pass # Get or create Region object, where country is either an object or None. region, created = Region.objects.get_or_create(link=link, country=country) # Update last visited, clicks, and save changes. region.last_visited = timezone.now() region.total_clicks = F('total_clicks') + 1 region.save()
Python
def update_link_referers(link, referer_source=None): ''' Create or Update Referer objects for Link. ''' # If referer exists, normalize. if referer_source: referer_source = Referer.normalize_source(referer_source) if referer_source == Site.objects.get_current().domain: referer_source = '' # Get or create Referer object. referer, created = Referer.objects.get_or_create( link=link, source=referer_source ) # Update last visited, clicks, and save changes. referer.last_visited = timezone.now() referer.total_clicks = F('total_clicks') + 1 referer.save()
def update_link_referers(link, referer_source=None): ''' Create or Update Referer objects for Link. ''' # If referer exists, normalize. if referer_source: referer_source = Referer.normalize_source(referer_source) if referer_source == Site.objects.get_current().domain: referer_source = '' # Get or create Referer object. referer, created = Referer.objects.get_or_create( link=link, source=referer_source ) # Update last visited, clicks, and save changes. referer.last_visited = timezone.now() referer.total_clicks = F('total_clicks') + 1 referer.save()
Python
def link_owner(wrapped_function): ''' A decorator to allow the owner of the Link to proceed. ''' @wraps(wrapped_function) def wrapper(request, *args, **kwargs): key = kwargs.get('key') if not request.user.links.filter(key=key).exists(): raise PermissionDenied return wrapped_function(request, *args, **kwargs) return wrapper
def link_owner(wrapped_function): ''' A decorator to allow the owner of the Link to proceed. ''' @wraps(wrapped_function) def wrapper(request, *args, **kwargs): key = kwargs.get('key') if not request.user.links.filter(key=key).exists(): raise PermissionDenied return wrapped_function(request, *args, **kwargs) return wrapper
Python
def add_unique_ip(self, ip_address=None): ''' Update Unique IP Address for the Link. ''' if ip_address: self.addresses.update_or_create(address=ip_address)
def add_unique_ip(self, ip_address=None): ''' Update Unique IP Address for the Link. ''' if ip_address: self.addresses.update_or_create(address=ip_address)
Python
def total_clicks(self): ''' Return sum of all region's total clicks. ''' sum_data = self.regions.aggregate(Sum('total_clicks')) total_clicks = sum_data['total_clicks__sum'] return total_clicks if total_clicks else 0
def total_clicks(self): ''' Return sum of all region's total clicks. ''' sum_data = self.regions.aggregate(Sum('total_clicks')) total_clicks = sum_data['total_clicks__sum'] return total_clicks if total_clicks else 0
Python
def unique_clicks(self): ''' Return the total number of unique addresses that visited the Link. ''' return self.addresses.count()
def unique_clicks(self): ''' Return the total number of unique addresses that visited the Link. ''' return self.addresses.count()
Python
def _generate_key(): ''' Generate a random string based on the Hash length and Alphabet. ''' return ''.join( random.choice(settings.HASH_ALPHABET) for x in range(settings.HASH_LENGTH) )
def _generate_key(): ''' Generate a random string based on the Hash length and Alphabet. ''' return ''.join( random.choice(settings.HASH_ALPHABET) for x in range(settings.HASH_LENGTH) )
Python
def make_key(cls): ''' Make random key for Link. Ensure uniqueness of key by querying Database. ''' key = cls._generate_key() while cls.objects.filter(key=key).exists(): key = cls._generate_key() return key
def make_key(cls): ''' Make random key for Link. Ensure uniqueness of key by querying Database. ''' key = cls._generate_key() while cls.objects.filter(key=key).exists(): key = cls._generate_key() return key
Python
def normalize_key(cls, text): ''' Keys may only contain alphanumberic characters and dashes. ''' key_text = re.match(r'^[A-Za-z0-9-]+$', text) if key_text: key_text = key_text.string else: return None # Replace all dashes left and right of the string. key_text = ( key_text .lstrip('-') .rstrip('-') ) # Substitute 2 or more dashes with 1 dash. key_text = re.sub(r'-{2,}', '-', key_text) return key_text if key_text else None
def normalize_key(cls, text): ''' Keys may only contain alphanumberic characters and dashes. ''' key_text = re.match(r'^[A-Za-z0-9-]+$', text) if key_text: key_text = key_text.string else: return None # Replace all dashes left and right of the string. key_text = ( key_text .lstrip('-') .rstrip('-') ) # Substitute 2 or more dashes with 1 dash. key_text = re.sub(r'-{2,}', '-', key_text) return key_text if key_text else None
Python
def signup(request): ''' Signup for a new User account. ''' form = SignupForm(request.POST or None) if request.method == 'POST': if form.is_valid(): user = form.save() login(request, user) return redirect('index') return render(request, 'users/signup.html', { 'form': form })
def signup(request): ''' Signup for a new User account. ''' form = SignupForm(request.POST or None) if request.method == 'POST': if form.is_valid(): user = form.save() login(request, user) return redirect('index') return render(request, 'users/signup.html', { 'form': form })
Python
def remove_stale_tags_2(self): ''' Attempt to edit two links with LinkEditForm with authenticated user. Add a newly created tag to two links, link1 and link2. - Check tag to see link relations. - Check that tag exists in each link's tags. Remove the tag from one link, link2. - Check that tag is removed from link2's tags. - Check that tag still exists in link1's tags. - Check that tag still exists. ''' # Get the User and Login the User. user = User.objects.get(email='[email protected]') self.client.login(email=user.email, password='user') # Get two links created by user. link1, link2 = Link.objects.filter(user=user)[:2] # Create a tag. tag1 = Tag.objects.create(name='test-tag-1') # link1's data for LinkEditForm. d1 = { 'destination': link2.destination, 'title': link2.title, 'tags': tag1.name } # link2's data for LinkEditForm. d2 = { 'destination': link2.destination, 'title': link2.title, 'tags': tag1.name } # Edit link1 and link2 to 'add' tag1. form1 = LinkEditForm(d1, instance=link1, user=user) form2 = LinkEditForm(d2, instance=link2, user=user) # Ensure that both forms are valid, save them. self.assertTrue(form1.is_valid()) self.assertTrue(form2.is_valid()) link1_edited = form1.save() link2_edited = form2.save() # Check tag1 is in link1 and link2's tags. self.assertIn(tag1, link1_edited.tags.all()) self.assertIn(tag1, link2_edited.tags.all()) # Check that tag1 has 2 link relations. self.assertTrue(tag1.links.exists()) self.assertEqual(tag1.links.count(), 2) # Edit data to remove tag from link2's data. d2['tags'] = '' # Edit link2 to remove tag1. form2a = LinkEditForm(d2, instance=link2, user=user) # Ensure form is valid, save it. self.assertTrue(form2a.is_valid()) link2_edited2 = form2a.save() # Check that tag1 is removed from link2's tags. self.assertFalse(tag1 in link2_edited2.tags.all()) # Check that tag2 is still in link1's tags. self.assertTrue(tag1 in link1.tags.all()) # Check that tag still exists. self.assertTrue(Tag.objects.get(name=tag1.name).exists())
def remove_stale_tags_2(self): ''' Attempt to edit two links with LinkEditForm with authenticated user. Add a newly created tag to two links, link1 and link2. - Check tag to see link relations. - Check that tag exists in each link's tags. Remove the tag from one link, link2. - Check that tag is removed from link2's tags. - Check that tag still exists in link1's tags. - Check that tag still exists. ''' # Get the User and Login the User. user = User.objects.get(email='[email protected]') self.client.login(email=user.email, password='user') # Get two links created by user. link1, link2 = Link.objects.filter(user=user)[:2] # Create a tag. tag1 = Tag.objects.create(name='test-tag-1') # link1's data for LinkEditForm. d1 = { 'destination': link2.destination, 'title': link2.title, 'tags': tag1.name } # link2's data for LinkEditForm. d2 = { 'destination': link2.destination, 'title': link2.title, 'tags': tag1.name } # Edit link1 and link2 to 'add' tag1. form1 = LinkEditForm(d1, instance=link1, user=user) form2 = LinkEditForm(d2, instance=link2, user=user) # Ensure that both forms are valid, save them. self.assertTrue(form1.is_valid()) self.assertTrue(form2.is_valid()) link1_edited = form1.save() link2_edited = form2.save() # Check tag1 is in link1 and link2's tags. self.assertIn(tag1, link1_edited.tags.all()) self.assertIn(tag1, link2_edited.tags.all()) # Check that tag1 has 2 link relations. self.assertTrue(tag1.links.exists()) self.assertEqual(tag1.links.count(), 2) # Edit data to remove tag from link2's data. d2['tags'] = '' # Edit link2 to remove tag1. form2a = LinkEditForm(d2, instance=link2, user=user) # Ensure form is valid, save it. self.assertTrue(form2a.is_valid()) link2_edited2 = form2a.save() # Check that tag1 is removed from link2's tags. self.assertFalse(tag1 in link2_edited2.tags.all()) # Check that tag2 is still in link1's tags. self.assertTrue(tag1 in link1.tags.all()) # Check that tag still exists. self.assertTrue(Tag.objects.get(name=tag1.name).exists())
Python
def normalize_source(url): ''' Return hostname (including subdomains) of url. ''' url = urlparse(url) return url.hostname or url.path
def normalize_source(url): ''' Return hostname (including subdomains) of url. ''' url = urlparse(url) return url.hostname or url.path
Python
def clean_password2(self): ''' Check that both passwords match. ''' password1 = self.cleaned_data.get('password1') password2 = self.cleaned_data.get('password2') if password1 and password2 and password1 != password2: raise forms.ValidationError( self.error_messages['password_mismatch'] ) return password2
def clean_password2(self): ''' Check that both passwords match. ''' password1 = self.cleaned_data.get('password1') password2 = self.cleaned_data.get('password2') if password1 and password2 and password1 != password2: raise forms.ValidationError( self.error_messages['password_mismatch'] ) return password2
Python
def clean(self): ''' Check that both passwords match. ''' cleaned_data = super(SignupForm, self).clean() password1 = cleaned_data.get('password1') password2 = cleaned_data.get('password2') if password1 and password2 and password1 != password2: raise forms.ValidationError("Passwords don't match.") return cleaned_data
def clean(self): ''' Check that both passwords match. ''' cleaned_data = super(SignupForm, self).clean() password1 = cleaned_data.get('password1') password2 = cleaned_data.get('password2') if password1 and password2 and password1 != password2: raise forms.ValidationError("Passwords don't match.") return cleaned_data
Python
def save(self, commit=True): ''' Save provided password in hashed format. ''' user = super(SignupForm, self).save(commit=False) user.set_password(self.cleaned_data['password1']) if commit: user.save() return user
def save(self, commit=True): ''' Save provided password in hashed format. ''' user = super(SignupForm, self).save(commit=False) user.set_password(self.cleaned_data['password1']) if commit: user.save() return user
Python
def clean_destination(self): ''' Raise validation error if User enters a url that originates from this site. ''' destination = self.cleaned_data.get('destination') url = urlparse(destination) site_domain = Site.objects.get_current().domain if url.hostname == site_domain: raise forms.ValidationError('Sorry, this url is not allowed!') return destination
def clean_destination(self): ''' Raise validation error if User enters a url that originates from this site. ''' destination = self.cleaned_data.get('destination') url = urlparse(destination) site_domain = Site.objects.get_current().domain if url.hostname == site_domain: raise forms.ValidationError('Sorry, this url is not allowed!') return destination
Python
def clean_key(self): ''' Raise validation error if key is given, but User is not given. Raise validation error if User enters an existing key. ''' user_not_exists = not self.user or not self.user.is_authenticated key = self.cleaned_data.get('key') # If key is given and (User is None or # User not authenticated), raise exception. if key and user_not_exists: raise forms.ValidationError('Only logged in users can define key.') # If a key is given and an existing url has same key, raise exception. if key and Link.objects.filter(key=key).exists(): raise forms.ValidationError('Custom link is already taken!') # Normalize key. Raise validation error if a raw key # was given, but a cleaned_key was not obtained. # This indicates that the key is invalid. cleaned_key = Link.normalize_key(key) if key and not cleaned_key: raise forms.ValidationError( 'Custom key can only contain alphanumeric ' 'characters and dashes' ) return cleaned_key
def clean_key(self): ''' Raise validation error if key is given, but User is not given. Raise validation error if User enters an existing key. ''' user_not_exists = not self.user or not self.user.is_authenticated key = self.cleaned_data.get('key') # If key is given and (User is None or # User not authenticated), raise exception. if key and user_not_exists: raise forms.ValidationError('Only logged in users can define key.') # If a key is given and an existing url has same key, raise exception. if key and Link.objects.filter(key=key).exists(): raise forms.ValidationError('Custom link is already taken!') # Normalize key. Raise validation error if a raw key # was given, but a cleaned_key was not obtained. # This indicates that the key is invalid. cleaned_key = Link.normalize_key(key) if key and not cleaned_key: raise forms.ValidationError( 'Custom key can only contain alphanumeric ' 'characters and dashes' ) return cleaned_key
Python
def clean_title(self): ''' Raise validation error if title is given, but User is not given. ''' user_not_exists = not self.user or not self.user.is_authenticated title = self.cleaned_data.get('title') # If title is given and (User is None or # User not authenticated), raise exception. if title and user_not_exists: raise forms.ValidationError( 'Only logged in users can define a title.' ) return title
def clean_title(self): ''' Raise validation error if title is given, but User is not given. ''' user_not_exists = not self.user or not self.user.is_authenticated title = self.cleaned_data.get('title') # If title is given and (User is None or # User not authenticated), raise exception. if title and user_not_exists: raise forms.ValidationError( 'Only logged in users can define a title.' ) return title
Python
def clean_tags(self): ''' Resolve tags from an input string. Raise validation error if more than 8 tags. ''' user_not_exists = not self.user or not self.user.is_authenticated tags = self.cleaned_data.get('tags') if tags: # If tags is given and (User is None or # User not authenticated), raise exception. if user_not_exists: raise forms.ValidationError( 'Only logged in users can define tags.' ) # Split and normalize text from tags input. tags = [Tag.normalize_text(tag) for tag in tags.split(',')] # Filter 'None' from tags list. tags = list(filter(lambda x: x, tags)) # Raise exception if tag length exceeds limit. if len(tags) > settings.TAG_LIMIT: raise forms.ValidationError( 'Cannot have more than {} tags.'.format(settings.TAG_LIMIT) ) # Resolve Tag objects from tags list. tags = [Tag.objects.get_or_create(name=tag)[0] for tag in tags] return tags
def clean_tags(self): ''' Resolve tags from an input string. Raise validation error if more than 8 tags. ''' user_not_exists = not self.user or not self.user.is_authenticated tags = self.cleaned_data.get('tags') if tags: # If tags is given and (User is None or # User not authenticated), raise exception. if user_not_exists: raise forms.ValidationError( 'Only logged in users can define tags.' ) # Split and normalize text from tags input. tags = [Tag.normalize_text(tag) for tag in tags.split(',')] # Filter 'None' from tags list. tags = list(filter(lambda x: x, tags)) # Raise exception if tag length exceeds limit. if len(tags) > settings.TAG_LIMIT: raise forms.ValidationError( 'Cannot have more than {} tags.'.format(settings.TAG_LIMIT) ) # Resolve Tag objects from tags list. tags = [Tag.objects.get_or_create(name=tag)[0] for tag in tags] return tags
Python
def save(self, commit=True): ''' Overrides form save method. Generates key if key does not exist. Sets User if user is authenticated. ''' link = super(LinkFormMixin, self).save(commit=False) # Generate random key for Link if key does not exist. if not link.key: link.key = Link.make_key() # Set User if User is authenticated. if self.user and self.user.is_authenticated: link.user = self.user # Set default link title if not link.title: title = 'Link - {}'.format(link.key) link.title = title link.save() # Get tags to update link tags. tags = self.cleaned_data.get('tags') # If form had tag field if tags is not None: # Get tags before saving edit and # tags that were just entered. old_tags = set(link.tags.all()) new_tags = set(tags) # Clear existing tags. link.tags.clear() # Remove cleared tags that have no m2m to links. for tag in old_tags.difference(new_tags): if not tag.links.exists(): tag.delete() if tags: # Add tags to link. link.tags.add(*tags) return link
def save(self, commit=True): ''' Overrides form save method. Generates key if key does not exist. Sets User if user is authenticated. ''' link = super(LinkFormMixin, self).save(commit=False) # Generate random key for Link if key does not exist. if not link.key: link.key = Link.make_key() # Set User if User is authenticated. if self.user and self.user.is_authenticated: link.user = self.user # Set default link title if not link.title: title = 'Link - {}'.format(link.key) link.title = title link.save() # Get tags to update link tags. tags = self.cleaned_data.get('tags') # If form had tag field if tags is not None: # Get tags before saving edit and # tags that were just entered. old_tags = set(link.tags.all()) new_tags = set(tags) # Clear existing tags. link.tags.clear() # Remove cleared tags that have no m2m to links. for tag in old_tags.difference(new_tags): if not tag.links.exists(): tag.delete() if tags: # Add tags to link. link.tags.add(*tags) return link
Python
def process_package(package, in_release=False): """ Processes the given package object that belongs to a platform and adds it to the packages list variable in the parent scope. In essence, this method recursively traverses the JSON structure defined in packages.json and performs the core filtering. :param package: The package object to process. :param in_release: A boolean that indicates whether the current travels belongs to a package that needs to be filtered for the given release label. """ if isinstance(package, list): for entry in package: process_package(entry, in_release) elif isinstance(package, dict): if release is None: return for entry in package.get(release, []): process_package(entry, in_release=True) elif isinstance(package, str): # Filter out the package that doesn't belong to this release, # if a release label has been specified. if release is not None and not in_release: return packages.append(package) else: raise Exception('Unknown package of type: {}'.format(type(package)))
def process_package(package, in_release=False): """ Processes the given package object that belongs to a platform and adds it to the packages list variable in the parent scope. In essence, this method recursively traverses the JSON structure defined in packages.json and performs the core filtering. :param package: The package object to process. :param in_release: A boolean that indicates whether the current travels belongs to a package that needs to be filtered for the given release label. """ if isinstance(package, list): for entry in package: process_package(entry, in_release) elif isinstance(package, dict): if release is None: return for entry in package.get(release, []): process_package(entry, in_release=True) elif isinstance(package, str): # Filter out the package that doesn't belong to this release, # if a release label has been specified. if release is not None and not in_release: return packages.append(package) else: raise Exception('Unknown package of type: {}'.format(type(package)))
Python
def _username_to_uuid(self, username: str) -> str: """Get the UUID of the player.""" http_conn = http.client.HTTPSConnection("api.mojang.com") header = {'User-Agent': 'Minecraft Username -> UUID', 'Content-Type': 'application/json'} http_conn.request( "GET", "/users/profiles/minecraft/" + username, headers=header) response = http_conn.getresponse().read().decode("utf-8") if not response: raise KeyError("player probably doesn't exist") json_data = json.loads(response) return json_data['id']
def _username_to_uuid(self, username: str) -> str: """Get the UUID of the player.""" http_conn = http.client.HTTPSConnection("api.mojang.com") header = {'User-Agent': 'Minecraft Username -> UUID', 'Content-Type': 'application/json'} http_conn.request( "GET", "/users/profiles/minecraft/" + username, headers=header) response = http_conn.getresponse().read().decode("utf-8") if not response: raise KeyError("player probably doesn't exist") json_data = json.loads(response) return json_data['id']
Python
def hasAlpha(self, texture_fn: str) -> bool: """Check if texture file has alpha channel Args: texture_fn: filename of texture. Returns: Texture has alpha channel or not. """ if texture_fn not in self.table_alpha: full_filename = os.path.join(self.scenes_path, texture_fn) image = Image.open(full_filename) self.table_alpha[texture_fn] = len(image.mode) == 4 return self.table_alpha[texture_fn]
def hasAlpha(self, texture_fn: str) -> bool: """Check if texture file has alpha channel Args: texture_fn: filename of texture. Returns: Texture has alpha channel or not. """ if texture_fn not in self.table_alpha: full_filename = os.path.join(self.scenes_path, texture_fn) image = Image.open(full_filename) self.table_alpha[texture_fn] = len(image.mode) == 4 return self.table_alpha[texture_fn]
Python
def _getLookAt(self): """Get lookat vector by pos of player""" r = self.radius isx, dummy_1, isz = map(int, self.player.pos) vec = lookat.firstPerson(self.player) dx, dz = isx - r, isz - r return (vec[0] - dx, vec[1], vec[2] - dz, vec[3] - dx, vec[4], vec[5] - dz, vec[6], vec[7], vec[8])
def _getLookAt(self): """Get lookat vector by pos of player""" r = self.radius isx, dummy_1, isz = map(int, self.player.pos) vec = lookat.firstPerson(self.player) dx, dz = isx - r, isz - r return (vec[0] - dx, vec[1], vec[2] - dz, vec[3] - dx, vec[4], vec[5] - dz, vec[6], vec[7], vec[8])
Python
def laglongToCoord(theta: float, phi: float): """Convert lagtitude and longitude to xyz coordinate.""" from math import cos, sin, pi theta, phi = theta/180*pi, phi/180*pi return sin(theta)*cos(phi), sin(phi), cos(theta)*cos(phi)
def laglongToCoord(theta: float, phi: float): """Convert lagtitude and longitude to xyz coordinate.""" from math import cos, sin, pi theta, phi = theta/180*pi, phi/180*pi return sin(theta)*cos(phi), sin(phi), cos(theta)*cos(phi)
Python
def _getColor(biome_id: int, elevation: int, corner): """Calculate biome color by traingle lerp. Args: biome_id: Biome ID evelation: height of block corner: Color of 3 corners of triangle. Returns: (r, g, b) """ b = BIOMES[biome_id] temp = clamp(b[1] - elevation*0.00166667) rain = clamp(b[2])*temp alpha = [temp-rain, 1-temp, rain] ret_color = (0., 0., 0.) for i in range(3): ret_color = plus(ret_color, mult(corner[i], alpha[i])) ret_color = mult(ret_color, 1./255) ret_color = tuple(map(clamp, ret_color)) return ret_color
def _getColor(biome_id: int, elevation: int, corner): """Calculate biome color by traingle lerp. Args: biome_id: Biome ID evelation: height of block corner: Color of 3 corners of triangle. Returns: (r, g, b) """ b = BIOMES[biome_id] temp = clamp(b[1] - elevation*0.00166667) rain = clamp(b[2])*temp alpha = [temp-rain, 1-temp, rain] ret_color = (0., 0., 0.) for i in range(3): ret_color = plus(ret_color, mult(corner[i], alpha[i])) ret_color = mult(ret_color, 1./255) ret_color = tuple(map(clamp, ret_color)) return ret_color
Python
def pipeline_from_config_file(filename): """ public api to access pipeline creation when config is a json file, see `pipeline_from_config` for an example of what that config would look like """ return pipeline_from_config(json.load(open(filename, "r")))
def pipeline_from_config_file(filename): """ public api to access pipeline creation when config is a json file, see `pipeline_from_config` for an example of what that config would look like """ return pipeline_from_config(json.load(open(filename, "r")))
Python
def pipeline_from_config(config): """ public api to access pipeline creation pipeline config defines a series of transforms performed to each column of the data to be processed, along with any post processing steps see for example: config = { pre_process: [ { name: concat, field: [header, tags, subject], config: {out_field: metadata, glue: " "} } ], transforms: [ { type: featurizer, # basic field: [body_text, metadata], transforms: [ {name: tfidf, config: {}} ] }, { type: compound, # recursive transforms: [{ type: featurizer, field: subject_text, transforms: [{name: tfidf, config: {}}] }], post_process: [{ name: nmf, config: {} }] # none is just concat } ], # there is a concat prior to post process post_processes: [ # a list where each element is a step. nested lists are parallel tasks [{name: svd, config: {}}, {name: null, config: {}}], # step 1, all done in parallel, concat'd together {name: norm, config: {norm: l2}} ] } returned pipeline can then then be instantiated with `fit`, and used to `transform` data to create potentially useful features """ if ( not "pre_process" in config and not "transforms" in config and not "post_process" in config ): raise ValueError( "invalid configuration. must specify at least one of 'pre_process' 'transforms', 'post_process'" ) return _compound_pipeline_from_config(config)
def pipeline_from_config(config): """ public api to access pipeline creation pipeline config defines a series of transforms performed to each column of the data to be processed, along with any post processing steps see for example: config = { pre_process: [ { name: concat, field: [header, tags, subject], config: {out_field: metadata, glue: " "} } ], transforms: [ { type: featurizer, # basic field: [body_text, metadata], transforms: [ {name: tfidf, config: {}} ] }, { type: compound, # recursive transforms: [{ type: featurizer, field: subject_text, transforms: [{name: tfidf, config: {}}] }], post_process: [{ name: nmf, config: {} }] # none is just concat } ], # there is a concat prior to post process post_processes: [ # a list where each element is a step. nested lists are parallel tasks [{name: svd, config: {}}, {name: null, config: {}}], # step 1, all done in parallel, concat'd together {name: norm, config: {norm: l2}} ] } returned pipeline can then then be instantiated with `fit`, and used to `transform` data to create potentially useful features """ if ( not "pre_process" in config and not "transforms" in config and not "post_process" in config ): raise ValueError( "invalid configuration. must specify at least one of 'pre_process' 'transforms', 'post_process'" ) return _compound_pipeline_from_config(config)
Python
def build_array_vocabizer(col, binary=False, min_df=0.0, max_df=1.0, max_features=None): """ the specified column is an array of tokens to be used as a feature """ return ( "array_vocabizer_%s" % col, Pipeline( [ ("selector", ItemSelector(col)), ( "counter", TfidfVectorizer( input="content", binary=binary, use_idf=False, min_df=min_df, max_df=max_df, max_features=max_features, tokenizer=identity, preprocessor=identity, ), ), ] ), )
def build_array_vocabizer(col, binary=False, min_df=0.0, max_df=1.0, max_features=None): """ the specified column is an array of tokens to be used as a feature """ return ( "array_vocabizer_%s" % col, Pipeline( [ ("selector", ItemSelector(col)), ( "counter", TfidfVectorizer( input="content", binary=binary, use_idf=False, min_df=min_df, max_df=max_df, max_features=max_features, tokenizer=identity, preprocessor=identity, ), ), ] ), )
Python
def validate_file(file: AudioFile) -> Mapping: """Validate file for use with the other Audio methods Parameter --------- file: AudioFile Returns ------- validated_file : Mapping {"audio": str, "uri": str, ...} {"waveform": array or tensor, "sample_rate": int, "uri": str, ...} {"audio": file, "uri": "stream"} if `file` is an IOBase instance Raises ------ ValueError if file format is not valid or file does not exist. """ if isinstance(file, Mapping): pass elif isinstance(file, (str, Path)): file = {"audio": str(file), "uri": Path(file).stem} elif isinstance(file, IOBase): return {"audio": file, "uri": "stream"} else: raise ValueError(AudioFileDocString) if "waveform" in file: waveform: Union[np.ndarray, Tensor] = file["waveform"] if len(waveform.shape) != 2 or waveform.shape[0] > waveform.shape[1]: raise ValueError( "'waveform' must be provided as a (channel, time) torch Tensor." ) sample_rate: int = file.get("sample_rate", None) if sample_rate is None: raise ValueError( "'waveform' must be provided with their 'sample_rate'." ) file.setdefault("uri", "waveform") elif "audio" in file: if isinstance(file["audio"], IOBase): return file path = Path(file["audio"]) if not path.is_file(): raise ValueError(f"File {path} does not exist") file.setdefault("uri", path.stem) return file
def validate_file(file: AudioFile) -> Mapping: """Validate file for use with the other Audio methods Parameter --------- file: AudioFile Returns ------- validated_file : Mapping {"audio": str, "uri": str, ...} {"waveform": array or tensor, "sample_rate": int, "uri": str, ...} {"audio": file, "uri": "stream"} if `file` is an IOBase instance Raises ------ ValueError if file format is not valid or file does not exist. """ if isinstance(file, Mapping): pass elif isinstance(file, (str, Path)): file = {"audio": str(file), "uri": Path(file).stem} elif isinstance(file, IOBase): return {"audio": file, "uri": "stream"} else: raise ValueError(AudioFileDocString) if "waveform" in file: waveform: Union[np.ndarray, Tensor] = file["waveform"] if len(waveform.shape) != 2 or waveform.shape[0] > waveform.shape[1]: raise ValueError( "'waveform' must be provided as a (channel, time) torch Tensor." ) sample_rate: int = file.get("sample_rate", None) if sample_rate is None: raise ValueError( "'waveform' must be provided with their 'sample_rate'." ) file.setdefault("uri", "waveform") elif "audio" in file: if isinstance(file["audio"], IOBase): return file path = Path(file["audio"]) if not path.is_file(): raise ValueError(f"File {path} does not exist") file.setdefault("uri", path.stem) return file
Python
def crop( self, file: AudioFile, chunk: Union[Segment, List[Segment]], fixed: Optional[float] = None, ) -> Union[SlidingWindowFeature, np.ndarray]: """Run inference on a chunk or a list of chunks Parameters ---------- file : AudioFile Audio file. chunk : Segment or list of Segment Apply model on this chunk. When a list of chunks is provided and window is set to "sliding", this is equivalent to calling crop on the smallest chunk that contains all chunks. In case window is set to "whole", this is equivalent to concatenating each chunk into one (artifical) chunk before processing it. fixed : float, optional Enforce chunk duration (in seconds). This is a hack to avoid rounding errors that may result in a different number of audio samples for two chunks of the same duration. # TODO: document "fixed" better in pyannote.audio.core.io.Audio Returns ------- output : SlidingWindowFeature or np.ndarray Model output, as `SlidingWindowFeature` if `window` is set to "sliding" and `np.ndarray` if is set to "whole". Notes ----- If model needs to be warmed up, remember to extend the requested chunk with the corresponding amount of time so that it is actually warmed up when processing the chunk of interest: >>> chunk_of_interest = Segment(10, 15) >>> extended_chunk = Segment(10 - warm_up, 15 + warm_up) >>> inference.crop(file, extended_chunk).crop(chunk_of_interest, returns_data=False) """ if self.window == "sliding": if not isinstance(chunk, Segment): start = min(c.start for c in chunk) end = max(c.end for c in chunk) chunk = Segment(start=start, end=end) waveform, sample_rate = self.model.audio.crop(file, chunk, fixed=fixed) output = self.slide(waveform, sample_rate) frames = output.sliding_window shifted_frames = SlidingWindow( start=chunk.start, duration=frames.duration, step=frames.step ) return SlidingWindowFeature(output.data, shifted_frames) elif self.window == "whole": if isinstance(chunk, Segment): waveform, sample_rate = self.model.audio.crop(file, chunk, fixed=fixed) else: waveform = torch.cat( [self.model.audio.crop(file, c)[0] for c in chunk], dim=1 ) return self.infer(waveform[None])[0] else: raise NotImplementedError( f"Unsupported window type '{self.window}': should be 'sliding' or 'whole'." )
def crop( self, file: AudioFile, chunk: Union[Segment, List[Segment]], fixed: Optional[float] = None, ) -> Union[SlidingWindowFeature, np.ndarray]: """Run inference on a chunk or a list of chunks Parameters ---------- file : AudioFile Audio file. chunk : Segment or list of Segment Apply model on this chunk. When a list of chunks is provided and window is set to "sliding", this is equivalent to calling crop on the smallest chunk that contains all chunks. In case window is set to "whole", this is equivalent to concatenating each chunk into one (artifical) chunk before processing it. fixed : float, optional Enforce chunk duration (in seconds). This is a hack to avoid rounding errors that may result in a different number of audio samples for two chunks of the same duration. # TODO: document "fixed" better in pyannote.audio.core.io.Audio Returns ------- output : SlidingWindowFeature or np.ndarray Model output, as `SlidingWindowFeature` if `window` is set to "sliding" and `np.ndarray` if is set to "whole". Notes ----- If model needs to be warmed up, remember to extend the requested chunk with the corresponding amount of time so that it is actually warmed up when processing the chunk of interest: >>> chunk_of_interest = Segment(10, 15) >>> extended_chunk = Segment(10 - warm_up, 15 + warm_up) >>> inference.crop(file, extended_chunk).crop(chunk_of_interest, returns_data=False) """ if self.window == "sliding": if not isinstance(chunk, Segment): start = min(c.start for c in chunk) end = max(c.end for c in chunk) chunk = Segment(start=start, end=end) waveform, sample_rate = self.model.audio.crop(file, chunk, fixed=fixed) output = self.slide(waveform, sample_rate) frames = output.sliding_window shifted_frames = SlidingWindow( start=chunk.start, duration=frames.duration, step=frames.step ) return SlidingWindowFeature(output.data, shifted_frames) elif self.window == "whole": if isinstance(chunk, Segment): waveform, sample_rate = self.model.audio.crop(file, chunk, fixed=fixed) else: waveform = torch.cat( [self.model.audio.crop(file, c)[0] for c in chunk], dim=1 ) return self.infer(waveform[None])[0] else: raise NotImplementedError( f"Unsupported window type '{self.window}': should be 'sliding' or 'whole'." )
Python
def stage(self, stage): raise NotImplementedError('Please define this method.') """ Here the user will implement anything they require in order to get their quest to function as desired. Some possibilities include checking for items in the iventory: subscribe to the pub_item_obtained event and check the player's inventory for items. Check if a player has stepped onto a tile by writing a quest.stage method into the array_map class's send_data method. Or, make NPCs say certain things by creating an if statement to check if a quest has a certain stage. """
def stage(self, stage): raise NotImplementedError('Please define this method.') """ Here the user will implement anything they require in order to get their quest to function as desired. Some possibilities include checking for items in the iventory: subscribe to the pub_item_obtained event and check the player's inventory for items. Check if a player has stepped onto a tile by writing a quest.stage method into the array_map class's send_data method. Or, make NPCs say certain things by creating an if statement to check if a quest has a certain stage. """
Python
def player_win(self, plyr, enemy): # The player wins """ This method is defined by users of Gilbo. If the player wins battle(), this method is called. Whether they loot the enemy, or gain experience, it must be defined here. """
def player_win(self, plyr, enemy): # The player wins """ This method is defined by users of Gilbo. If the player wins battle(), this method is called. Whether they loot the enemy, or gain experience, it must be defined here. """
Python
def player_lose(self, plyr, enemy): # The player loses """ This method is defined by users of Gilbo. If the player loses battle(), this method is called. Whether they lose money and respawn, or get booted out to the last time they saved, it must be defined here. """
def player_lose(self, plyr, enemy): # The player loses """ This method is defined by users of Gilbo. If the player loses battle(), this method is called. Whether they lose money and respawn, or get booted out to the last time they saved, it must be defined here. """
Python
def _ismissing(val, islat=True): """Return True if a value is None or out of bounds. This function is used to check for invalid latitude/longitude values. Args: val (numeric): A numeric value. islat (:obj:`bool`): Set to False if checking for longitude values. Returns: :obj:`bool`: True if the value is None, or an out of bounds value. """ if islat: if val is None: return True if math.fabs(val) > 90.: return True else: if val is None: return True if math.fabs(val) > 360.: return True return False
def _ismissing(val, islat=True): """Return True if a value is None or out of bounds. This function is used to check for invalid latitude/longitude values. Args: val (numeric): A numeric value. islat (:obj:`bool`): Set to False if checking for longitude values. Returns: :obj:`bool`: True if the value is None, or an out of bounds value. """ if islat: if val is None: return True if math.fabs(val) > 90.: return True else: if val is None: return True if math.fabs(val) > 360.: return True return False
Python
def _context_equal(x, y, ctx): """Return True if both objects are equal based on the provided context. Args: x (numeric): A numeric value. y (numeric): A numeric value. ctx (:class:`decimal.Context`): A decimal Context object. Returns: :obj:`bool`: True if the values are equal based on the provided context, False otherwise. """ if x is not None: if y is None: return False # Note: The float conversion is because these may come in as # numpy.float32 or numpy.float64, which Decimal does not know # how to handle. if (Decimal(float(x)).normalize(ctx) != Decimal(float(y)).normalize(ctx)): return False else: if y is not None: return False return True
def _context_equal(x, y, ctx): """Return True if both objects are equal based on the provided context. Args: x (numeric): A numeric value. y (numeric): A numeric value. ctx (:class:`decimal.Context`): A decimal Context object. Returns: :obj:`bool`: True if the values are equal based on the provided context, False otherwise. """ if x is not None: if y is None: return False # Note: The float conversion is because these may come in as # numpy.float32 or numpy.float64, which Decimal does not know # how to handle. if (Decimal(float(x)).normalize(ctx) != Decimal(float(y)).normalize(ctx)): return False else: if y is not None: return False return True
Python
def cartopy_xlim(self, geobounds): """Return the x extents in projected coordinates for cartopy. Returns: :obj:`list`: A pair of [xmin, xmax]. See Also: :mod:`cartopy`, :mod:`matplotlib` """ try: _ = len(geobounds) except TypeError: x_extents = self._cart_extents(geobounds)[0] else: extents = self._cart_extents(geobounds) x_extents = np.empty(extents.shape, np.object) for idxs, extent in np.ndenumerate(extents): x_extents[idxs] = extent[0] return x_extents
def cartopy_xlim(self, geobounds): """Return the x extents in projected coordinates for cartopy. Returns: :obj:`list`: A pair of [xmin, xmax]. See Also: :mod:`cartopy`, :mod:`matplotlib` """ try: _ = len(geobounds) except TypeError: x_extents = self._cart_extents(geobounds)[0] else: extents = self._cart_extents(geobounds) x_extents = np.empty(extents.shape, np.object) for idxs, extent in np.ndenumerate(extents): x_extents[idxs] = extent[0] return x_extents
Python
def cartopy_ylim(self, geobounds): """Return the y extents in projected coordinates for cartopy. Returns: :obj:`list`: A pair of [ymin, ymax]. See Also: :mod:`cartopy`, :mod:`matplotlib` """ try: _ = len(geobounds) except TypeError: y_extents = self._cart_extents(geobounds)[1] else: extents = self._cart_extents(geobounds) y_extents = np.empty(extents.shape, np.object) for idxs, extent in np.ndenumerate(extents): y_extents[idxs] = extent[1] return y_extents
def cartopy_ylim(self, geobounds): """Return the y extents in projected coordinates for cartopy. Returns: :obj:`list`: A pair of [ymin, ymax]. See Also: :mod:`cartopy`, :mod:`matplotlib` """ try: _ = len(geobounds) except TypeError: y_extents = self._cart_extents(geobounds)[1] else: extents = self._cart_extents(geobounds) y_extents = np.empty(extents.shape, np.object) for idxs, extent in np.ndenumerate(extents): y_extents[idxs] = extent[1] return y_extents
Python
def proj4(self): """Return the PROJ.4 string for the map projection. Returns: :obj:`str`: A string suitable for use with the PROJ.4 library. See Also: PROJ.4 <https://trac.osgeo.org/proj/>`_ """ return self._proj4()
def proj4(self): """Return the PROJ.4 string for the map projection. Returns: :obj:`str`: A string suitable for use with the PROJ.4 library. See Also: PROJ.4 <https://trac.osgeo.org/proj/>`_ """ return self._proj4()