Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def tagReportCallback(llrpMsg): global tagReport tags = llrpMsg.msgdict['RO_ACCESS_REPORT']['TagReportData'] if len(tags): logger.info('saw tag(s): %s', pprint.pformat(tags)) else: logger.info('no tags seen') return for tag in tags: tagReport += tag['TagSeenCount'][0] if "OpSpecResult" in tag: result = tag["OpSpecResult"].get("Result") logger.debug("result: %s", result)
[ "Function to run each time the reader reports seeing tags." ]
Please provide a description of the function:def inventory(host, port, time, report_every_n_tags, antennas, tx_power, tari, session, mode_identifier, tag_population, reconnect, tag_filter_mask, impinj_extended_configuration, impinj_search_mode, impinj_reports, impinj_fixed_freq): # XXX band-aid hack to provide many args to _inventory.main Args = namedtuple('Args', ['host', 'port', 'time', 'every_n', 'antennas', 'tx_power', 'tari', 'session', 'population', 'mode_identifier', 'reconnect', 'tag_filter_mask', 'impinj_extended_configuration', 'impinj_search_mode', 'impinj_reports', 'impinj_fixed_freq']) args = Args(host=host, port=port, time=time, every_n=report_every_n_tags, antennas=antennas, tx_power=tx_power, tari=tari, session=session, population=tag_population, mode_identifier=mode_identifier, reconnect=reconnect, tag_filter_mask=tag_filter_mask, impinj_extended_configuration=impinj_extended_configuration, impinj_search_mode=impinj_search_mode, impinj_reports=impinj_reports, impinj_fixed_freq=impinj_fixed_freq) logger.debug('inventory args: %s', args) _inventory.main(args)
[ "Conduct inventory (searching the area around the antennas)." ]
Please provide a description of the function:def read(filename): fname = os.path.join(here, filename) with codecs.open(fname, encoding='utf-8') as f: return f.read()
[ "\n Get the long description from a file.\n " ]
Please provide a description of the function:def deserialize(self): if self.msgbytes is None: raise LLRPError('No message bytes to deserialize.') data = self.msgbytes msgtype, length, msgid = struct.unpack(self.full_hdr_fmt, data[:self.full_hdr_len]) ver = (msgtype >> 10) & BITMASK(3) msgtype = msgtype & BITMASK(10) try: name = Message_Type2Name[msgtype] logger.debug('deserializing %s command', name) decoder = Message_struct[name]['decode'] except KeyError: raise LLRPError('Cannot find decoder for message type ' '{}'.format(msgtype)) body = data[self.full_hdr_len:length] try: self.msgdict = { name: dict(decoder(body)) } self.msgdict[name]['Ver'] = ver self.msgdict[name]['Type'] = msgtype self.msgdict[name]['ID'] = msgid logger.debug('done deserializing %s command', name) except ValueError: logger.exception('Unable to decode body %s, %s', body, decoder(body)) except LLRPError: logger.exception('Problem with %s message format', name) return '' return ''
[ "Turns a sequence of bytes into a message dictionary." ]
Please provide a description of the function:def parseReaderConfig(self, confdict): logger.debug('parseReaderConfig input: %s', confdict) conf = {} for k, v in confdict.items(): if not k.startswith('Parameter'): continue ty = v['Type'] data = v['Data'] vendor = None subtype = None try: vendor, subtype = v['Vendor'], v['Subtype'] except KeyError: pass if ty == 1023: if vendor == 25882 and subtype == 37: tempc = struct.unpack('!H', data)[0] conf.update(temperature=tempc) else: conf[ty] = data return conf
[ "Parse a reader configuration dictionary.\n\n Examples:\n {\n Type: 23,\n Data: b'\\x00'\n }\n {\n Type: 1023,\n Vendor: 25882,\n Subtype: 21,\n Data: b'\\x00'\n }\n " ]
Please provide a description of the function:def parseCapabilities(self, capdict): # check requested antenna set gdc = capdict['GeneralDeviceCapabilities'] max_ant = gdc['MaxNumberOfAntennaSupported'] if max(self.antennas) > max_ant: reqd = ','.join(map(str, self.antennas)) avail = ','.join(map(str, range(1, max_ant + 1))) errmsg = ('Invalid antenna set specified: requested={},' ' available={}; ignoring invalid antennas'.format( reqd, avail)) raise ReaderConfigurationError(errmsg) logger.debug('set antennas: %s', self.antennas) # parse available transmit power entries, set self.tx_power bandcap = capdict['RegulatoryCapabilities']['UHFBandCapabilities'] self.tx_power_table = self.parsePowerTable(bandcap) logger.debug('tx_power_table: %s', self.tx_power_table) self.setTxPower(self.tx_power) # parse list of reader's supported mode identifiers regcap = capdict['RegulatoryCapabilities'] modes = regcap['UHFBandCapabilities']['UHFRFModeTable'] mode_list = [modes[k] for k in sorted(modes.keys(), key=natural_keys)] # select a mode by matching available modes to requested parameters if self.mode_identifier is not None: logger.debug('Setting mode from mode_identifier=%s', self.mode_identifier) try: mode = [mo for mo in mode_list if mo['ModeIdentifier'] == self.mode_identifier][0] self.reader_mode = mode except IndexError: valid_modes = sorted(mo['ModeIdentifier'] for mo in mode_list) errstr = ('Invalid mode_identifier; valid mode_identifiers' ' are {}'.format(valid_modes)) raise ReaderConfigurationError(errstr) # if we're trying to set Tari explicitly, but the selected mode doesn't # support the requested Tari, that's a configuration error. if self.reader_mode and self.tari: if self.reader_mode['MinTari'] < self.tari < self.reader_mode['MaxTari']: logger.debug('Overriding mode Tari %s with requested Tari %s', self.reader_mode['MaxTari'], self.tari) else: errstr = ('Requested Tari {} is incompatible with selected ' 'mode {}'.format(self.tari, self.reader_mode)) logger.info('using reader mode: %s', self.reader_mode)
[ "Parse a capabilities dictionary and adjust instance settings.\n\n At the time this function is called, the user has requested some\n settings (e.g., mode identifier), but we haven't yet asked the reader\n whether those requested settings are within its capabilities. This\n function's job is to parse the reader's capabilities, compare them\n against any requested settings, and raise an error if there are any\n incompatibilities.\n\n Sets the following instance variables:\n - self.antennas (list of antenna numbers, e.g., [1] or [1, 2])\n - self.tx_power_table (list of dBm values)\n - self.reader_mode (dictionary of mode settings, e.g., Tari)\n\n Raises ReaderConfigurationError if the requested settings are not\n within the reader's capabilities.\n " ]
Please provide a description of the function:def handleMessage(self, lmsg): logger.debug('LLRPMessage received in state %s: %s', self.state, lmsg) msgName = lmsg.getName() lmsg.proto = self lmsg.peername = self.peername # call per-message callbacks logger.debug('starting message callbacks for %s', msgName) for fn in self._message_callbacks[msgName]: fn(lmsg) logger.debug('done with message callbacks for %s', msgName) # keepalives can occur at any time if msgName == 'KEEPALIVE': self.send_KEEPALIVE_ACK() return if msgName == 'RO_ACCESS_REPORT' and \ self.state != LLRPClient.STATE_INVENTORYING: logger.debug('ignoring RO_ACCESS_REPORT because not inventorying') return if msgName == 'READER_EVENT_NOTIFICATION' and \ self.state >= LLRPClient.STATE_CONNECTED: logger.debug('Got reader event notification') return logger.debug('in handleMessage(%s), there are %d Deferreds', msgName, len(self._deferreds[msgName])) ####### # LLRP client state machine follows. Beware: gets thorny. Note the # order of the LLRPClient.STATE_* fields. ####### # in DISCONNECTED, CONNECTING, and CONNECTED states, expect only # READER_EVENT_NOTIFICATION messages. if self.state in (LLRPClient.STATE_DISCONNECTED, LLRPClient.STATE_CONNECTING, LLRPClient.STATE_CONNECTED): if msgName != 'READER_EVENT_NOTIFICATION': logger.error('unexpected message %s while connecting', msgName) return if not lmsg.isSuccess(): rend = lmsg.msgdict[msgName]['ReaderEventNotificationData'] try: status = rend['ConnectionAttemptEvent']['Status'] except KeyError: status = '(unknown status)' logger.fatal('Could not start session on reader: %s', status) return self.processDeferreds(msgName, lmsg.isSuccess()) # a Deferred to call when we get GET_READER_CAPABILITIES_RESPONSE d = defer.Deferred() d.addCallback(self._setState_wrapper, LLRPClient.STATE_CONNECTED) d.addErrback(self.panic, 'GET_READER_CAPABILITIES failed') if (self.impinj_search_mode or self.impinj_tag_content_selector or self.impinj_extended_configuration or self.impinj_fixed_frequency_param): caps = defer.Deferred() caps.addCallback(self.send_GET_READER_CAPABILITIES, onCompletion=d) caps.addErrback(self.panic, 'ENABLE_IMPINJ_EXTENSIONS failed') self.send_ENABLE_IMPINJ_EXTENSIONS(onCompletion=caps) else: self.send_GET_READER_CAPABILITIES(self, onCompletion=d) elif self.state == LLRPClient.STATE_SENT_ENABLE_IMPINJ_EXTENSIONS: logger.debug(lmsg) if msgName != 'CUSTOM_MESSAGE': logger.error('unexpected response %s while enabling Impinj' 'extensions', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s enabling Impinj extensions: %s', status, err) return logger.debug('Successfully enabled Impinj extensions') self.processDeferreds(msgName, lmsg.isSuccess()) # in state SENT_GET_CAPABILITIES, expect GET_CAPABILITIES_RESPONSE; # respond to this message by advancing to state CONNECTED. elif self.state == LLRPClient.STATE_SENT_GET_CAPABILITIES: if msgName != 'GET_READER_CAPABILITIES_RESPONSE': logger.error('unexpected response %s getting capabilities', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s getting capabilities: %s', status, err) return self.capabilities = \ lmsg.msgdict['GET_READER_CAPABILITIES_RESPONSE'] logger.debug('Capabilities: %s', pprint.pformat(self.capabilities)) try: self.parseCapabilities(self.capabilities) except LLRPError as err: logger.exception('Capabilities mismatch') raise err self.processDeferreds(msgName, lmsg.isSuccess()) d = defer.Deferred() d.addCallback(self._setState_wrapper, LLRPClient.STATE_SENT_GET_CONFIG) d.addErrback(self.panic, 'GET_READER_CONFIG failed') self.send_GET_READER_CONFIG(onCompletion=d) elif self.state == LLRPClient.STATE_SENT_GET_CONFIG: if msgName not in ('GET_READER_CONFIG_RESPONSE', 'DELETE_ACCESSSPEC_RESPONSE', 'DELETE_ROSPEC_RESPONSE'): logger.error('unexpected response %s getting config', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s getting reader config: %s', status, err) return if msgName == 'GET_READER_CONFIG_RESPONSE': config = lmsg.msgdict['GET_READER_CONFIG_RESPONSE'] self.configuration = self.parseReaderConfig(config) logger.debug('Reader configuration: %s', self.configuration) self.processDeferreds(msgName, lmsg.isSuccess()) d = defer.Deferred() d.addCallback(self._setState_wrapper, LLRPClient.STATE_SENT_SET_CONFIG) d.addErrback(self.panic, 'SET_READER_CONFIG failed') self.send_ENABLE_EVENTS_AND_REPORTS() self.send_SET_READER_CONFIG(onCompletion=d) elif self.state == LLRPClient.STATE_SENT_SET_CONFIG: if msgName not in ('SET_READER_CONFIG_RESPONSE', 'GET_READER_CONFIG_RESPONSE', 'DELETE_ACCESSSPEC_RESPONSE'): logger.error('unexpected response %s setting config', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s setting reader config: %s', status, err) return self.processDeferreds(msgName, lmsg.isSuccess()) if self.reset_on_connect: d = self.stopPolitely(disconnect=False) if self.start_inventory: d.addCallback(self.startInventory) elif self.start_inventory: self.startInventory() # in state SENT_ADD_ROSPEC, expect only ADD_ROSPEC_RESPONSE; respond to # favorable ADD_ROSPEC_RESPONSE by enabling the added ROSpec and # advancing to state SENT_ENABLE_ROSPEC. elif self.state == LLRPClient.STATE_SENT_ADD_ROSPEC: if msgName != 'ADD_ROSPEC_RESPONSE': logger.error('unexpected response %s when adding ROSpec', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s adding ROSpec: %s', status, err) return self.processDeferreds(msgName, lmsg.isSuccess()) # in state SENT_ENABLE_ROSPEC, expect only ENABLE_ROSPEC_RESPONSE; # respond to favorable ENABLE_ROSPEC_RESPONSE by starting the enabled # ROSpec and advancing to state SENT_START_ROSPEC. elif self.state == LLRPClient.STATE_SENT_ENABLE_ROSPEC: if msgName != 'ENABLE_ROSPEC_RESPONSE': logger.error('unexpected response %s when enabling ROSpec', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s enabling ROSpec: %s', status, err) return self.processDeferreds(msgName, lmsg.isSuccess()) # in state PAUSING, we have sent a DISABLE_ROSPEC, so expect only # DISABLE_ROSPEC_RESPONSE. advance to state PAUSED. elif self.state == LLRPClient.STATE_PAUSING: if msgName != 'DISABLE_ROSPEC_RESPONSE': logger.error('unexpected response %s ' ' when disabling ROSpec', msgName) if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.error('DISABLE_ROSPEC failed with status %s: %s', status, err) logger.warn('Error %s disabling ROSpec: %s', status, err) self.processDeferreds(msgName, lmsg.isSuccess()) # in state SENT_START_ROSPEC, expect only START_ROSPEC_RESPONSE; # respond to favorable START_ROSPEC_RESPONSE by advancing to state # INVENTORYING. elif self.state == LLRPClient.STATE_SENT_START_ROSPEC: if msgName == 'RO_ACCESS_REPORT': return if msgName == 'READER_EVENT_NOTIFICATION': return if msgName != 'START_ROSPEC_RESPONSE': logger.error('unexpected response %s when starting ROSpec', msgName) if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.error('START_ROSPEC failed with status %s: %s', status, err) logger.fatal('Error %s starting ROSpec: %s', status, err) return self.processDeferreds(msgName, lmsg.isSuccess()) elif self.state == LLRPClient.STATE_INVENTORYING: if msgName not in ('RO_ACCESS_REPORT', 'READER_EVENT_NOTIFICATION', 'ADD_ACCESSSPEC_RESPONSE', 'ENABLE_ACCESSSPEC_RESPONSE', 'DISABLE_ACCESSSPEC_RESPONSE', 'DELETE_ACCESSSPEC_RESPONSE'): logger.error('unexpected message %s while inventorying', msgName) return self.processDeferreds(msgName, lmsg.isSuccess()) elif self.state == LLRPClient.STATE_SENT_DELETE_ACCESSSPEC: if msgName != 'DELETE_ACCESSSPEC_RESPONSE': logger.error('unexpected response %s when deleting AccessSpec', msgName) self.processDeferreds(msgName, lmsg.isSuccess()) elif self.state == LLRPClient.STATE_SENT_DELETE_ROSPEC: if msgName != 'DELETE_ROSPEC_RESPONSE': logger.error('unexpected response %s when deleting ROSpec', msgName) if lmsg.isSuccess(): if self.disconnecting: self.setState(LLRPClient.STATE_DISCONNECTED) else: self.setState(LLRPClient.STATE_CONNECTED) else: status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.error('DELETE_ROSPEC failed with status %s: %s', status, err) self.processDeferreds(msgName, lmsg.isSuccess()) if self.disconnecting: logger.info('disconnecting') self.transport.loseConnection() else: logger.warn('message %s received in unknown state!', msgName) if self._deferreds[msgName]: logger.error('there should NOT be Deferreds left for %s,' ' but there are!', msgName)
[ "Implements the LLRP client state machine." ]
Please provide a description of the function:def startInventory(self, proto=None, force_regen_rospec=False): if self.state == LLRPClient.STATE_INVENTORYING: logger.warn('ignoring startInventory() while already inventorying') return None rospec = self.getROSpec(force_new=force_regen_rospec)['ROSpec'] logger.info('starting inventory') # upside-down chain of callbacks: add, enable, start ROSpec # started_rospec = defer.Deferred() # started_rospec.addCallback(self._setState_wrapper, # LLRPClient.STATE_INVENTORYING) # started_rospec.addErrback(self.panic, 'START_ROSPEC failed') # logger.debug('made started_rospec') enabled_rospec = defer.Deferred() enabled_rospec.addCallback(self._setState_wrapper, LLRPClient.STATE_INVENTORYING) # enabled_rospec.addCallback(self.send_START_ROSPEC, rospec, # onCompletion=started_rospec) enabled_rospec.addErrback(self.panic, 'ENABLE_ROSPEC failed') logger.debug('made enabled_rospec') added_rospec = defer.Deferred() added_rospec.addCallback(self.send_ENABLE_ROSPEC, rospec, onCompletion=enabled_rospec) added_rospec.addErrback(self.panic, 'ADD_ROSPEC failed') logger.debug('made added_rospec') self.send_ADD_ROSPEC(rospec, onCompletion=added_rospec)
[ "Add a ROSpec to the reader and enable it." ]
Please provide a description of the function:def stopPolitely(self, disconnect=False): logger.info('stopping politely') if disconnect: logger.info('will disconnect when stopped') self.disconnecting = True self.sendMessage({ 'DELETE_ACCESSSPEC': { 'Ver': 1, 'Type': 41, 'ID': 0, 'AccessSpecID': 0 # all AccessSpecs }}) self.setState(LLRPClient.STATE_SENT_DELETE_ACCESSSPEC) d = defer.Deferred() d.addCallback(self.stopAllROSpecs) d.addErrback(self.panic, 'DELETE_ACCESSSPEC failed') self._deferreds['DELETE_ACCESSSPEC_RESPONSE'].append(d) return d
[ "Delete all active ROSpecs. Return a Deferred that will be called\n when the DELETE_ROSPEC_RESPONSE comes back." ]
Please provide a description of the function:def parsePowerTable(uhfbandcap): bandtbl = {k: v for k, v in uhfbandcap.items() if k.startswith('TransmitPowerLevelTableEntry')} tx_power_table = [0] * (len(bandtbl) + 1) for k, v in bandtbl.items(): idx = v['Index'] tx_power_table[idx] = int(v['TransmitPowerValue']) / 100.0 return tx_power_table
[ "Parse the transmit power table\n\n @param uhfbandcap: Capability dictionary from\n self.capabilities['RegulatoryCapabilities']['UHFBandCapabilities']\n @return: a list of [0, dBm value, dBm value, ...]\n\n >>> LLRPClient.parsePowerTable({'TransmitPowerLevelTableEntry1': \\\n {'Index': 1, 'TransmitPowerValue': 3225}})\n [0, 32.25]\n >>> LLRPClient.parsePowerTable({})\n [0]\n " ]
Please provide a description of the function:def get_tx_power(self, tx_power): if not self.tx_power_table: logger.warn('get_tx_power(): tx_power_table is empty!') return {} logger.debug('requested tx_power: %s', tx_power) min_power = self.tx_power_table.index(min(self.tx_power_table)) max_power = self.tx_power_table.index(max(self.tx_power_table)) ret = {} for antid, tx_power in tx_power.items(): if tx_power == 0: # tx_power = 0 means max power max_power_dbm = max(self.tx_power_table) tx_power = self.tx_power_table.index(max_power_dbm) ret[antid] = (tx_power, max_power_dbm) try: power_dbm = self.tx_power_table[tx_power] ret[antid] = (tx_power, power_dbm) except IndexError: raise LLRPError('Invalid tx_power for antenna {}: ' 'requested={}, min_available={}, ' 'max_available={}'.format( antid, self.tx_power, min_power, max_power)) return ret
[ "Validates tx_power against self.tx_power_table\n\n @param tx_power: index into the self.tx_power_table list; if tx_power\n is 0 then the max power from self.tx_power_table\n @return: a dict {antenna: (tx_power_index, power_dbm)} from\n self.tx_power_table\n @raise: LLRPError if the requested index is out of range\n " ]
Please provide a description of the function:def setTxPower(self, tx_power): tx_pow_validated = self.get_tx_power(tx_power) logger.debug('tx_pow_validated: %s', tx_pow_validated) needs_update = False for ant, (tx_pow_idx, tx_pow_dbm) in tx_pow_validated.items(): if self.tx_power[ant] != tx_pow_idx: self.tx_power[ant] = tx_pow_idx needs_update = True logger.debug('tx_power for antenna %s: %s (%s dBm)', ant, tx_pow_idx, tx_pow_dbm) if needs_update and self.state == LLRPClient.STATE_INVENTORYING: logger.debug('changing tx power; will stop politely, then resume') d = self.stopPolitely() d.addCallback(self.startInventory, force_regen_rospec=True)
[ "Set the transmission power for one or more antennas.\n\n @param tx_power: index into self.tx_power_table\n " ]
Please provide a description of the function:def pause(self, duration_seconds=0, force=False, force_regen_rospec=False): logger.debug('pause(%s)', duration_seconds) if self.state != LLRPClient.STATE_INVENTORYING: if not force: logger.info('ignoring pause(); not inventorying (state==%s)', self.getStateName(self.state)) return None else: logger.info('forcing pause()') if duration_seconds: logger.info('pausing for %s seconds', duration_seconds) rospec = self.getROSpec(force_new=force_regen_rospec)['ROSpec'] self.sendMessage({ 'DISABLE_ROSPEC': { 'Ver': 1, 'Type': 25, 'ID': 0, 'ROSpecID': rospec['ROSpecID'] }}) self.setState(LLRPClient.STATE_PAUSING) d = defer.Deferred() d.addCallback(self._setState_wrapper, LLRPClient.STATE_PAUSED) d.addErrback(self.complain, 'pause() failed') self._deferreds['DISABLE_ROSPEC_RESPONSE'].append(d) if duration_seconds > 0: startAgain = task.deferLater(reactor, duration_seconds, lambda: None) startAgain.addCallback(lambda _: self.resume()) return d
[ "Pause an inventory operation for a set amount of time." ]
Please provide a description of the function:def sendMessage(self, msg_dict): sent_ids = [] for name in msg_dict: self.last_msg_id += 1 msg_dict[name]['ID'] = self.last_msg_id sent_ids.append((name, self.last_msg_id)) llrp_msg = LLRPMessage(msgdict=msg_dict) assert llrp_msg.msgbytes, "LLRPMessage is empty" self.transport.write(llrp_msg.msgbytes) return sent_ids
[ "Serialize and send a dict LLRP Message\n\n Note: IDs should be modified in original msg_dict as it is a reference.\n That should be ok.\n " ]
Please provide a description of the function:def buildProtocol(self, addr): self.resetDelay() # reset reconnection backoff state clargs = self.client_args.copy() # optionally configure antennas from self.antenna_dict, which looks # like {'10.0.0.1:5084': {'1': 'ant1', '2': 'ant2'}} hostport = '{}:{}'.format(addr.host, addr.port) logger.debug('Building protocol for %s', hostport) if hostport in self.antenna_dict: clargs['antennas'] = [ int(x) for x in self.antenna_dict[hostport].keys()] elif addr.host in self.antenna_dict: clargs['antennas'] = [ int(x) for x in self.antenna_dict[addr.host].keys()] logger.debug('Antennas in buildProtocol: %s', clargs.get('antennas')) logger.debug('%s start_inventory: %s', hostport, clargs.get('start_inventory')) if self.start_first and not self.protocols: # this is the first protocol, so let's start it inventorying clargs['start_inventory'] = True proto = LLRPClient(factory=self, **clargs) # register state-change callbacks with new client for state, cbs in self._state_callbacks.items(): for cb in cbs: proto.addStateCallback(state, cb) # register message callbacks with new client for msg_type, cbs in self._message_callbacks.items(): for cb in cbs: proto.addMessageCallback(msg_type, cb) return proto
[ "Get a new LLRP client protocol object.\n\n Consult self.antenna_dict to look up antennas to use.\n " ]
Please provide a description of the function:def setTxPower(self, tx_power, peername=None): if peername: protocols = [p for p in self.protocols if p.peername[0] == peername] else: protocols = self.protocols for proto in protocols: proto.setTxPower(tx_power)
[ "Set the transmit power on one or all readers\n\n If peername is None, set the transmit power for all readers.\n Otherwise, set it for that specific reader.\n " ]
Please provide a description of the function:def politeShutdown(self): protoDeferreds = [] for proto in self.protocols: protoDeferreds.append(proto.stopPolitely(disconnect=True)) return defer.DeferredList(protoDeferreds)
[ "Stop inventory on all connected readers." ]
Please provide a description of the function:def calculate_check_digit(gtin): '''Given a GTIN (8-14) or SSCC, calculate its appropriate check digit''' reverse_gtin = gtin[::-1] total = 0 count = 0 for char in reverse_gtin: digit = int(char) if count % 2 == 0: digit = digit * 3 total = total + digit count = count + 1 nearest_multiple_of_ten = int(math.ceil(total / 10.0) * 10) return nearest_multiple_of_ten - total
[]
Please provide a description of the function:def parse_sgtin_96(sgtin_96): '''Given a SGTIN-96 hex string, parse each segment. Returns a dictionary of the segments.''' if not sgtin_96: raise Exception('Pass in a value.') if not sgtin_96.startswith("30"): # not a sgtin, not handled raise Exception('Not SGTIN-96.') binary = "{0:020b}".format(int(sgtin_96, 16)).zfill(96) header = int(binary[:8], 2) tag_filter = int(binary[8:11], 2) partition = binary[11:14] partition_value = int(partition, 2) m, l, n, k = SGTIN_96_PARTITION_MAP[partition_value] company_start = 8 + 3 + 3 company_end = company_start + m company_data = int(binary[company_start:company_end], 2) if company_data > pow(10, l): # can't be too large raise Exception('Company value is too large') company_prefix = str(company_data).zfill(l) item_start = company_end item_end = item_start + n item_data = binary[item_start:item_end] item_number = int(item_data, 2) item_reference = str(item_number).zfill(k) serial = int(binary[-38:], 2) return { "header": header, "filter": tag_filter, "partition": partition, "company_prefix": company_prefix, "item_reference": item_reference, "serial": serial }
[]
Please provide a description of the function:def decode_Identification(data): header_len = struct.calcsize('!HHBH') msgtype, msglen, idtype, bytecount = struct.unpack( '!HHBH', data[:header_len]) ret = {} idtypes = ['MAC Address', 'EPC'] try: ret['IDType'] = idtypes[idtype] except IndexError: return {'IDType': b''}, data[msglen:] # the remainder is ID value ret['ReaderID'] = data[header_len:(header_len+bytecount)] return ret, data[msglen:]
[ "Identification parameter (LLRP 1.1 Section 13.2.2)" ]
Please provide a description of the function:def decode_param(data): logger.debug('decode_param data: %r', data) header_len = struct.calcsize('!HH') partype, parlen = struct.unpack('!HH', data[:header_len]) pardata = data[header_len:parlen] logger.debug('decode_param pardata: %r', pardata) ret = { 'Type': partype, } if partype == 1023: vsfmt = '!II' vendor, subtype = struct.unpack(vsfmt, pardata[:struct.calcsize(vsfmt)]) ret['Vendor'] = vendor ret['Subtype'] = subtype ret['Data'] = pardata[struct.calcsize(vsfmt):] else: ret['Data'] = pardata, return ret, data[parlen:]
[ "Decode any parameter to a byte sequence.\n\n :param data: byte sequence representing an LLRP parameter.\n :returns dict, bytes: where dict is {'Type': <decoded type>, 'Data':\n <decoded data>} and bytes is the remaining bytes trailing the bytes we\n could decode.\n " ]
Please provide a description of the function:def download_files(file_list): for _, source_data_file in file_list: sql_gz_name = source_data_file['name'].split('/')[-1] msg = 'Downloading: %s' % (sql_gz_name) log.debug(msg) new_data = objectstore.get_object( handelsregister_conn, source_data_file, 'handelsregister') # save output to file! with open('data/{}'.format(sql_gz_name), 'wb') as outputzip: outputzip.write(new_data)
[ "Download the latest data. " ]
Please provide a description of the function:def get_connection(store_settings: dict={}) -> Connection: store = store_settings if not store_settings: store = make_config_from_env() os_options = { 'tenant_id': store['TENANT_ID'], 'region_name': store['REGION_NAME'], # 'endpoint_type': 'internalURL' } # when we are running in cloudvps we should use internal urls use_internal = os.getenv('OBJECTSTORE_LOCAL', '') if use_internal: os_options['endpoint_type'] = 'internalURL' connection = Connection( authurl=store['AUTHURL'], user=store['USER'], key=store['PASSWORD'], tenant_name=store['TENANT_NAME'], auth_version=store['VERSION'], os_options=os_options ) return connection
[ "\n get an objectsctore connection\n " ]
Please provide a description of the function:def get_object(connection, object_meta_data: dict, dirname: str): return connection.get_object(dirname, object_meta_data['name'])[1]
[ "\n Download object from objectstore.\n object_meta_data is an object retured when\n using 'get_full_container_list'\n " ]
Please provide a description of the function:def put_object( connection, container: str, object_name: str, contents, content_type: str) -> None: connection.put_object( container, object_name, contents=contents, content_type=content_type)
[ "\n Put file to objectstore\n\n container == \"path/in/store\"\n object_name = \"your_file_name.txt\"\n contents=thefiledata (fileobject) open('ourfile', 'rb')\n content_type='csv' / 'application/json' .. etc\n " ]
Please provide a description of the function:def delete_object(connection, container: str, object_meta_data: dict) -> None: connection.delete_object(container, object_meta_data['name'])
[ "\n Delete single object from objectstore\n " ]
Please provide a description of the function:def return_file_objects(connection, container, prefix='database'): options = [] meta_data = objectstore.get_full_container_list( connection, container, prefix='database') env = ENV.upper() for o_info in meta_data: expected_file = f'database.{ENV}' if o_info['name'].startswith(expected_file): dt = dateparser.parse(o_info['last_modified']) now = datetime.datetime.now() delta = now - dt LOG.debug('AGE: %d %s', delta.days, expected_file) options.append((dt, o_info)) options.sort() return options
[ "Given connecton and container find database dumps\n " ]
Please provide a description of the function:def remove_old_dumps(connection, container: str, days=None): if not days: return if days < 20: LOG.error('A minimum of 20 backups is stored') return options = return_file_objects(connection, container) for dt, o_info in options: now = datetime.datetime.now() delta = now - dt if delta.days > days: LOG.info('Deleting %s', o_info['name']) objectstore.delete_object(connection, container, o_info)
[ "Remove dumps older than x days\n " ]
Please provide a description of the function:def download_database(connection, container: str, target: str=""): meta_data = objectstore.get_full_container_list( connection, container, prefix='database') options = return_file_objects(connection, container) for o_info in meta_data: expected_file = f'database.{ENV}' LOG.info(o_info['name']) if o_info['name'].startswith(expected_file): dt = dateparser.parse(o_info['last_modified']) # now = datetime.datetime.now() options.append((dt, o_info)) options.sort() if not options: LOG.error('Dumps missing? ENVIRONMENT wrong? (acceptance / production') LOG.error('Environtment {ENV}') sys.exit(1) newest = options[-1][1] LOG.debug('Downloading: %s', (newest['name'])) target_file = os.path.join(target, expected_file) LOG.info('TARGET: %s', target_file) if os.path.exists(target_file): LOG.info('Already downloaded') return LOG.error('TARGET does not exists downloading...') new_data = objectstore.get_object(connection, newest, container) # save output to file! with open(target_file, 'wb') as outputzip: outputzip.write(new_data)
[ "\n Download database dump\n " ]
Please provide a description of the function:def run(connection): parser = argparse.ArgumentParser(description=) parser.add_argument( 'location', nargs=1, default=f'{DUMPFOLDER}/database.{ENV}.dump', help="Dump file location") parser.add_argument( 'objectstore', nargs=1, default=f'{DUMPFOLDER}/database.{ENV}.dump', help="Dump file objectstore location") parser.add_argument( '--download-db', action='store_true', dest='download', default=False, help='Download db') parser.add_argument( '--upload-db', action='store_true', dest='upload', default=False, help='Upload db') parser.add_argument( '--container', action='store_true', dest='container', default=False, help='Upload db') parser.add_argument( '--days', type=int, nargs=1, dest='days', default=0, help='Days to keep database dumps') args = parser.parse_args() if args.days: LOG.debug('Cleanup old dumps') remove_old_dumps( connection, args.objectstore[0], args.days[0]) elif args.download: download_database( connection, args.objectstore[0], args.location[0]) elif args.upload: upload_database( connection, args.objectstore[0], args.location[0]) else: parser.print_help()
[ "\n Parse arguments and start upload/download\n ", "\n Process database dumps.\n\n Either download of upload a dump file to the objectstore.\n\n downloads the latest dump and uploads with envronment and date\n into given container destination\n " ]
Please provide a description of the function:def remember(self, user_name): ''' Remember the authenticated identity. This method simply delegates to another IIdentifier plugin if configured. ''' log.debug('Repoze OAuth remember') environ = toolkit.request.environ rememberer = self._get_rememberer(environ) identity = {'repoze.who.userid': user_name} headers = rememberer.remember(environ, identity) for header, value in headers: toolkit.response.headers.add(header, value)
[]
Please provide a description of the function:def redirect_from_callback(self): '''Redirect to the callback URL after a successful authentication.''' state = toolkit.request.params.get('state') came_from = get_came_from(state) toolkit.response.status = 302 toolkit.response.location = came_from
[]
Please provide a description of the function:def can_share_folder(self, user, folder): return folder.parent_id is None and folder.author_id == user.id
[ "\n Return True if `user` can share `folder`.\n " ]
Please provide a description of the function:def storage_color(self, user_storage): p = user_storage.percentage if p >= 0 and p < 60: return "success" if p >= 60 and p < 90: return "warning" if p >= 90 and p <= 100: return "danger" raise ValueError("percentage out of range")
[ "\n Return labels indicating amount of storage used.\n " ]
Please provide a description of the function:def folder_created_message(self, request, folder): messages.success(request, _("Folder {} was created".format(folder)))
[ "\n Send messages.success message after successful folder creation.\n " ]
Please provide a description of the function:def document_created_message(self, request, document): messages.success(request, _("Document {} was created".format(document)))
[ "\n Send messages.success message after successful document creation.\n " ]
Please provide a description of the function:def folder_shared_message(self, request, user, folder): messages.success(request, _("Folder {} is now shared with {}".format(folder, user)))
[ "\n Send messages.success message after successful share.\n " ]
Please provide a description of the function:def folder_pre_delete(self, request, folder): for m in folder.members(): if m.__class__ == folder.__class__: self.folder_pre_delete(request, m) m.delete()
[ "\n Perform folder operations prior to deletions. For example, deleting all contents.\n " ]
Please provide a description of the function:def file_upload_to(self, instance, filename): ext = filename.split(".")[-1] filename = "{}.{}".format(uuid.uuid4(), ext) return os.path.join("document", filename)
[ "\n Callable passed to the FileField's upload_to kwarg on Document.file\n " ]
Please provide a description of the function:def for_user(self, user): qs = SharedMemberQuerySet(model=self.model, using=self._db, user=user) qs = qs.filter(Q(author=user) | Q(foldershareduser__user=user)) return qs.distinct() & self.distinct()
[ "\n All folders the given user can do something with.\n " ]
Please provide a description of the function:def _parse_list(cls, args): argparser = ArgumentParser(prog="cluster list") group = argparser.add_mutually_exclusive_group() group.add_argument("--id", dest="cluster_id", help="show cluster with this id") group.add_argument("--label", dest="label", help="show cluster with this label") group.add_argument("--state", dest="state", action="store", choices=['up', 'down', 'pending', 'terminating'], help="list only clusters in the given state") pagination_group = group.add_argument_group() pagination_group.add_argument("--page", dest="page", action="store", type=int, help="page number") pagination_group.add_argument("--per-page", dest="per_page", action="store", type=int, help="number of clusters to be retrieved per page") arguments = argparser.parse_args(args) return vars(arguments)
[ "\n Parse command line arguments to construct a dictionary of cluster\n parameters that can be used to determine which clusters to list.\n\n Args:\n `args`: sequence of arguments\n\n Returns:\n Dictionary that can be used to determine which clusters to list\n " ]
Please provide a description of the function:def list(cls, state=None, page=None, per_page=None): conn = Qubole.agent() params = {} if page: params['page'] = page if per_page: params['per_page'] = per_page if (params.get('page') or params.get('per_page')) and Qubole.version == 'v1.2': log.warn("Pagination is not supported with API v1.2. Fetching all clusters.") params = None if not params else params cluster_list = conn.get(cls.rest_entity_path, params=params) if state is None: return cluster_list elif state is not None: result = [] if Qubole.version == 'v1.2': for cluster in cluster_list: if state.lower() == cluster['cluster']['state'].lower(): result.append(cluster) elif Qubole.version == 'v1.3': cluster_list = cluster_list['clusters'] for cluster in cluster_list: if state.lower() == cluster['state'].lower(): result.append(cluster) return result
[ "\n List existing clusters present in your account.\n\n Kwargs:\n `state`: list only those clusters which are in this state\n\n Returns:\n List of clusters satisfying the given criteria\n " ]
Please provide a description of the function:def show(cls, cluster_id_label): conn = Qubole.agent() return conn.get(cls.element_path(cluster_id_label))
[ "\n Show information about the cluster with id/label `cluster_id_label`.\n " ]
Please provide a description of the function:def status(cls, cluster_id_label): conn = Qubole.agent(version=Cluster.api_version) return conn.get(cls.element_path(cluster_id_label) + "/state")
[ "\n Show the status of the cluster with id/label `cluster_id_label`.\n " ]
Please provide a description of the function:def master(cls, cluster_id_label): cluster_status = cls.status(cluster_id_label) if cluster_status.get("state") == 'UP': return list(filter(lambda x: x["role"] == "master", cluster_status.get("nodes")))[0] else: return cluster_status
[ "\n Show the details of the master of the cluster with id/label `cluster_id_label`.\n " ]
Please provide a description of the function:def start(cls, cluster_id_label, api_version=None): conn = Qubole.agent(version=api_version) data = {"state": "start"} return conn.put(cls.element_path(cluster_id_label) + "/state", data)
[ "\n Start the cluster with id/label `cluster_id_label`.\n " ]
Please provide a description of the function:def terminate(cls, cluster_id_label): conn = Qubole.agent(version=Cluster.api_version) data = {"state": "terminate"} return conn.put(cls.element_path(cluster_id_label) + "/state", data)
[ "\n Terminate the cluster with id/label `cluster_id_label`.\n " ]
Please provide a description of the function:def _parse_create_update(cls, args, action, api_version): argparser = ArgumentParser(prog="cluster %s" % action) create_required = False label_required = False if action == "create": create_required = True elif action == "update": argparser.add_argument("cluster_id_label", help="id/label of the cluster to update") elif action == "clone": argparser.add_argument("cluster_id_label", help="id/label of the cluster to update") label_required = True argparser.add_argument("--label", dest="label", nargs="+", required=(create_required or label_required), help="list of labels for the cluster" + " (atleast one label is required)") ec2_group = argparser.add_argument_group("ec2 settings") ec2_group.add_argument("--access-key-id", dest="aws_access_key_id", help="access key id for customer's aws" + " account. This is required while" + " creating the cluster",) ec2_group.add_argument("--secret-access-key", dest="aws_secret_access_key", help="secret access key for customer's aws" + " account. This is required while" + " creating the cluster",) ec2_group.add_argument("--aws-region", dest="aws_region", choices=["us-east-1", "us-west-2", "ap-northeast-1", "sa-east-1", "eu-west-1", "ap-southeast-1", "us-west-1"], help="aws region to create the cluster in",) ec2_group.add_argument("--aws-availability-zone", dest="aws_availability_zone", help="availability zone to" + " create the cluster in",) ec2_group.add_argument("--subnet-id", dest="subnet_id", help="subnet to create the cluster in",) ec2_group.add_argument("--vpc-id", dest="vpc_id", help="vpc to create the cluster in",) ec2_group.add_argument("--master-elastic-ip", dest="master_elastic_ip", help="elastic ip to attach to master",) ec2_group.add_argument("--bastion-node-public-dns", dest="bastion_node_public_dns", help="public dns name of the bastion node. Required only if cluster is in private subnet of a EC2-VPC",) ec2_group.add_argument("--role-instance-profile", dest="role_instance_profile", help="IAM Role instance profile to attach on cluster",) hadoop_group = argparser.add_argument_group("hadoop settings") node_config_group = argparser.add_argument_group("node configuration") if (api_version >= 1.3) else hadoop_group node_config_group.add_argument("--master-instance-type", dest="master_instance_type", help="instance type to use for the hadoop" + " master node",) node_config_group.add_argument("--slave-instance-type", dest="slave_instance_type", help="instance type to use for the hadoop" + " slave nodes",) node_config_group.add_argument("--initial-nodes", dest="initial_nodes", type=int, help="number of nodes to start the" + " cluster with",) node_config_group.add_argument("--max-nodes", dest="max_nodes", type=int, help="maximum number of nodes the cluster" + " may be auto-scaled up to") node_config_group.add_argument("--slave-request-type", dest="slave_request_type", choices=["ondemand", "spot", "hybrid", "spotblock"], help="purchasing option for slave instaces",) node_config_group.add_argument("--root-volume-size", dest="root_volume_size", type=int, help="size of root volume in GB") hadoop_group.add_argument("--custom-config", dest="custom_config_file", help="location of file containg custom" + " hadoop configuration overrides") hadoop_group.add_argument("--use-hbase", dest="use_hbase", action="store_true", default=None, help="Use hbase on this cluster",) hadoop_group.add_argument("--is-ha", dest="is_ha", action="store_true", default=None, help="Enable HA config for cluster") if api_version >= 1.3: qubole_placement_policy_group = hadoop_group.add_mutually_exclusive_group() qubole_placement_policy_group.add_argument("--use-qubole-placement-policy", dest="use_qubole_placement_policy", action="store_true", default=None, help="Use Qubole Block Placement policy" + " for clusters with spot nodes",) qubole_placement_policy_group.add_argument("--no-use-qubole-placement-policy", dest="use_qubole_placement_policy", action="store_false", default=None, help="Do not use Qubole Block Placement policy" + " for clusters with spot nodes",) fallback_to_ondemand_group = node_config_group.add_mutually_exclusive_group() fallback_to_ondemand_group.add_argument("--fallback-to-ondemand", dest="fallback_to_ondemand", action="store_true", default=None, help="Fallback to on-demand nodes if spot nodes" + " could not be obtained. Valid only if slave_request_type is spot",) fallback_to_ondemand_group.add_argument("--no-fallback-to-ondemand", dest="fallback_to_ondemand", action="store_false", default=None, help="Dont Fallback to on-demand nodes if spot nodes" + " could not be obtained. Valid only if slave_request_type is spot",) node_cooldown_period_group = argparser.add_argument_group("node cooldown period settings") node_cooldown_period_group.add_argument("--node-base-cooldown-period", dest="node_base_cooldown_period", type=int, help="Cooldown period for on-demand nodes" + " unit: minutes") node_cooldown_period_group.add_argument("--node-spot-cooldown-period", dest="node_spot_cooldown_period", type=int, help="Cooldown period for spot nodes" + " unit: minutes") ebs_volume_group = argparser.add_argument_group("ebs volume settings") ebs_volume_group.add_argument("--ebs-volume-count", dest="ebs_volume_count", type=int, help="Number of EBS volumes to attach to" + " each instance of the cluster",) ebs_volume_group.add_argument("--ebs-volume-type", dest="ebs_volume_type", choices=["standard", "gp2"], help=" of the EBS volume. Valid values are " + "'standard' (magnetic) and 'gp2' (ssd).",) ebs_volume_group.add_argument("--ebs-volume-size", dest="ebs_volume_size", type=int, help="Size of each EBS volume, in GB",) enable_rubix_group = hadoop_group.add_mutually_exclusive_group() enable_rubix_group.add_argument("--enable-rubix", dest="enable_rubix", action="store_true", default=None, help="Enable rubix for cluster", ) enable_rubix_group.add_argument("--no-enable-rubix", dest="enable_rubix", action="store_false", default=None, help="Do not enable rubix for cluster", ) hadoop2 = hadoop_group.add_mutually_exclusive_group() hadoop2.add_argument("--use-hadoop2", dest="use_hadoop2", action="store_true", default=None, help="Use hadoop2 instead of hadoop1") hadoop2.add_argument("--use-hadoop1", dest="use_hadoop2", action="store_false", default=None, help="Use hadoop1 instead of hadoop2. This is the default.") hadoop2.add_argument("--use-spark", dest="use_spark", action="store_true", default=None, help="Turn on spark for this cluster") spot_group = argparser.add_argument_group("spot instance settings" + " (valid only when slave-request-type is hybrid or spot)") spot_group.add_argument("--maximum-bid-price-percentage", dest="maximum_bid_price_percentage", type=float, help="maximum value to bid for spot instances" + " expressed as a percentage of the base" + " price for the slave node instance type",) spot_group.add_argument("--timeout-for-spot-request", dest="timeout_for_request", type=int, help="timeout for a spot instance request" + " unit: minutes") spot_group.add_argument("--maximum-spot-instance-percentage", dest="maximum_spot_instance_percentage", type=int, help="maximum percentage of instances that may" + " be purchased from the aws spot market," + " valid only when slave-request-type" + " is 'hybrid'",) stable_spot_group = argparser.add_argument_group("stable spot instance settings") stable_spot_group.add_argument("--stable-maximum-bid-price-percentage", dest="stable_maximum_bid_price_percentage", type=float, help="maximum value to bid for stable node spot instances" + " expressed as a percentage of the base" + " price for the master and slave node instance types",) stable_spot_group.add_argument("--stable-timeout-for-spot-request", dest="stable_timeout_for_request", type=int, help="timeout for a stable node spot instance request" + " unit: minutes") stable_spot_group.add_argument("--stable-allow-fallback", dest="stable_allow_fallback", default=None, type=str2bool, help="whether to fallback to on-demand instances for stable nodes" + " if spot instances aren't available") spot_block_group = argparser.add_argument_group("spot block settings") spot_block_group.add_argument("--spot-block-duration", dest="spot_block_duration", type=int, help="spot block duration" + " unit: minutes") fairscheduler_group = argparser.add_argument_group( "fairscheduler configuration options") fairscheduler_group.add_argument("--fairscheduler-config-xml", dest="fairscheduler_config_xml_file", help="location for file containing" + " xml with custom configuration" + " for the fairscheduler",) fairscheduler_group.add_argument("--fairscheduler-default-pool", dest="default_pool", help="default pool for the" + " fairscheduler",) security_group = argparser.add_argument_group("security setttings") ephemerals = security_group.add_mutually_exclusive_group() ephemerals.add_argument("--encrypted-ephemerals", dest="encrypted_ephemerals", action="store_true", default=None, help="encrypt the ephemeral drives on" + " the instance",) ephemerals.add_argument("--no-encrypted-ephemerals", dest="encrypted_ephemerals", action="store_false", default=None, help="don't encrypt the ephemeral drives on" + " the instance",) security_group.add_argument("--customer-ssh-key", dest="customer_ssh_key_file", help="location for ssh key to use to" + " login to the instance") security_group.add_argument("--persistent-security-group", dest="persistent_security_group", help="a security group to associate with each" + " node of the cluster. Typically used" + " to provide access to external hosts") presto_group = argparser.add_argument_group("presto settings") enabling_presto = presto_group.add_mutually_exclusive_group() enabling_presto.add_argument("--enable-presto", dest="enable_presto", action="store_true", default=None, help="Enable presto for this cluster",) enabling_presto.add_argument("--disable-presto", dest="enable_presto", action="store_false", default=None, help="Disable presto for this cluster",) presto_group.add_argument("--presto-custom-config", dest="presto_custom_config_file", help="location of file containg custom" + " presto configuration overrides") termination = argparser.add_mutually_exclusive_group() termination.add_argument("--disallow-cluster-termination", dest="disallow_cluster_termination", action="store_true", default=None, help="don't auto-terminate idle clusters," + " use this with extreme caution",) termination.add_argument("--allow-cluster-termination", dest="disallow_cluster_termination", action="store_false", default=None, help="auto-terminate idle clusters,") ganglia = argparser.add_mutually_exclusive_group() ganglia.add_argument("--enable-ganglia-monitoring", dest="enable_ganglia_monitoring", action="store_true", default=None, help="enable ganglia monitoring for the" + " cluster",) ganglia.add_argument("--disable-ganglia-monitoring", dest="enable_ganglia_monitoring", action="store_false", default=None, help="disable ganglia monitoring for the" + " cluster",) argparser.add_argument("--node-bootstrap-file", dest="node_bootstrap_file", help=,) argparser.add_argument("--custom-ec2-tags", dest="custom_ec2_tags", help=,) env_group = argparser.add_argument_group("environment settings") env_group.add_argument("--env-name", dest="env_name", default=None, help="name of Python and R environment") env_group.add_argument("--python-version", dest="python_version", default=None, help="version of Python in environment") env_group.add_argument("--r-version", dest="r_version", default=None, help="version of R in environment") arguments = argparser.parse_args(args) return arguments
[ "\n Parse command line arguments to determine cluster parameters that can\n be used to create or update a cluster.\n\n Args:\n `args`: sequence of arguments\n\n `action`: \"create\", \"update\" or \"clone\"\n\n Returns:\n Object that contains cluster parameters\n ", "name of the node bootstrap file for this cluster. It\n should be in stored in S3 at\n <account-default-location>/scripts/hadoop/NODE_BOOTSTRAP_FILE\n ", "Custom ec2 tags to be set on all instances\n of the cluster. Specified as JSON object (key-value pairs)\n e.g. --custom-ec2-tags '{\"key1\":\"value1\", \"key2\":\"value2\"}'\n " ]
Please provide a description of the function:def _parse_cluster_manage_command(cls, args, action): argparser = ArgumentParser(prog="cluster_manage_command") group = argparser.add_mutually_exclusive_group(required=True) group.add_argument("--id", dest="cluster_id", help="execute on cluster with this id") group.add_argument("--label", dest="label", help="execute on cluster with this label") if action == "remove" or action == "update": argparser.add_argument("--private_dns", help="the private_dns of the machine to be updated/removed", required=True) if action == "update": argparser.add_argument("--command", help="the update command to be executed", required=True, choices=["replace"]) arguments = argparser.parse_args(args) return arguments
[ "\n Parse command line arguments for cluster manage commands.\n " ]
Please provide a description of the function:def _parse_reassign_label(cls, args): argparser = ArgumentParser(prog="cluster reassign_label") argparser.add_argument("destination_cluster", metavar="destination_cluster_id_label", help="id/label of the cluster to move the label to") argparser.add_argument("label", help="label to be moved from the source cluster") arguments = argparser.parse_args(args) return arguments
[ "\n Parse command line arguments for reassigning label.\n " ]
Please provide a description of the function:def reassign_label(cls, destination_cluster, label): conn = Qubole.agent(version=Cluster.api_version) data = { "destination_cluster": destination_cluster, "label": label } return conn.put(cls.rest_entity_path + "/reassign-label", data)
[ "\n Reassign a label from one cluster to another.\n\n Args:\n `destination_cluster`: id/label of the cluster to move the label to\n\n `label`: label to be moved from the source cluster\n " ]
Please provide a description of the function:def delete(cls, cluster_id_label): conn = Qubole.agent(version=Cluster.api_version) return conn.delete(cls.element_path(cluster_id_label))
[ "\n Delete the cluster with id/label `cluster_id_label`.\n " ]
Please provide a description of the function:def _parse_snapshot_restore_command(cls, args, action): argparser = ArgumentParser(prog="cluster %s" % action) group = argparser.add_mutually_exclusive_group(required=True) group.add_argument("--id", dest="cluster_id", help="execute on cluster with this id") group.add_argument("--label", dest="label", help="execute on cluster with this label") argparser.add_argument("--s3_location", help="s3_location where backup is stored", required=True) if action == "snapshot": argparser.add_argument("--backup_type", help="backup_type: full/incremental, default is full") elif action == "restore_point": argparser.add_argument("--backup_id", help="back_id from which restoration will be done", required=True) argparser.add_argument("--table_names", help="table(s) which are to be restored", required=True) argparser.add_argument("--no-overwrite", action="store_false", help="With this option, restore overwrites to the existing table if theres any in restore target") argparser.add_argument("--no-automatic", action="store_false", help="With this option, all the dependencies are automatically restored together with this backup image following the correct order") arguments = argparser.parse_args(args) return arguments
[ "\n Parse command line arguments for snapshot command.\n " ]
Please provide a description of the function:def _parse_get_snapshot_schedule(cls, args): argparser = ArgumentParser(prog="cluster snapshot_schedule") group = argparser.add_mutually_exclusive_group(required=True) group.add_argument("--id", dest="cluster_id", help="execute on cluster with this id") group.add_argument("--label", dest="label", help="execute on cluster with this label") arguments = argparser.parse_args(args) return arguments
[ "\n Parse command line arguments for updating hbase snapshot schedule or to get details.\n " ]
Please provide a description of the function:def _parse_update_snapshot_schedule(cls, args): argparser = ArgumentParser(prog="cluster snapshot_schedule") group = argparser.add_mutually_exclusive_group(required=True) group.add_argument("--id", dest="cluster_id", help="execute on cluster with this id") group.add_argument("--label", dest="label", help="execute on cluster with this label") argparser.add_argument("--frequency-num", help="frequency number") argparser.add_argument("--frequency-unit", help="frequency unit") argparser.add_argument("--s3-location", help="s3_location about where to store snapshots") argparser.add_argument("--status", help="status of periodic job you want to change to", choices = ["RUNNING", "SUSPENDED"]) arguments = argparser.parse_args(args) return arguments
[ "\n Parse command line arguments for updating hbase snapshot schedule or to get details.\n " ]
Please provide a description of the function:def snapshot(cls, cluster_id_label, s3_location, backup_type): conn = Qubole.agent(version=Cluster.api_version) parameters = {} parameters['s3_location'] = s3_location if backup_type: parameters['backup_type'] = backup_type return conn.post(cls.element_path(cluster_id_label) + "/snapshots", data=parameters)
[ "\n Create hbase snapshot full/incremental\n " ]
Please provide a description of the function:def restore_point(cls, cluster_id_label, s3_location, backup_id, table_names, overwrite=True, automatic=True): conn = Qubole.agent(version=Cluster.api_version) parameters = {} parameters['s3_location'] = s3_location parameters['backup_id'] = backup_id parameters['table_names'] = table_names parameters['overwrite'] = overwrite parameters['automatic'] = automatic return conn.post(cls.element_path(cluster_id_label) + "/restore_point", data=parameters)
[ "\n Restoring cluster from a given hbase snapshot id\n " ]
Please provide a description of the function:def update_snapshot_schedule(cls, cluster_id_label, s3_location=None, frequency_unit=None, frequency_num=None, status=None): conn = Qubole.agent(version=Cluster.api_version) data = {} if s3_location is not None: data["s3_location"] = s3_location if frequency_unit is not None: data["frequency_unit"] = frequency_unit if frequency_num is not None: data["frequency_num"] = frequency_num if status is not None: data["status"] = status return conn.put(cls.element_path(cluster_id_label) + "/snapshot_schedule", data)
[ "\n Update for snapshot schedule\n " ]
Please provide a description of the function:def add_node(cls, cluster_id_label, parameters=None): conn = Qubole.agent(version=Cluster.api_version) parameters = {} if not parameters else parameters return conn.post(cls.element_path(cluster_id_label) + "/nodes", data={"parameters" : parameters})
[ "\n Add a node to an existing cluster\n " ]
Please provide a description of the function:def remove_node(cls, cluster_id_label, private_dns, parameters=None): conn = Qubole.agent(version=Cluster.api_version) parameters = {} if not parameters else parameters data = {"private_dns" : private_dns, "parameters" : parameters} return conn.delete(cls.element_path(cluster_id_label) + "/nodes", data)
[ "\n Add a node to an existing cluster\n " ]
Please provide a description of the function:def update_node(cls, cluster_id_label, command, private_dns, parameters=None): conn = Qubole.agent(version=Cluster.api_version) parameters = {} if not parameters else parameters data = {"command" : command, "private_dns" : private_dns, "parameters" : parameters} return conn.put(cls.element_path(cluster_id_label) + "/nodes", data)
[ "\n Add a node to an existing cluster\n " ]
Please provide a description of the function:def set_ec2_settings(self, aws_region=None, aws_availability_zone=None, vpc_id=None, subnet_id=None, master_elastic_ip=None, role_instance_profile=None, bastion_node_public_dns=None): self.ec2_settings['aws_region'] = aws_region self.ec2_settings['aws_preferred_availability_zone'] = aws_availability_zone self.ec2_settings['vpc_id'] = vpc_id self.ec2_settings['subnet_id'] = subnet_id self.ec2_settings['role_instance_profile'] = role_instance_profile self.ec2_settings['master_elastic_ip'] = master_elastic_ip self.ec2_settings['bastion_node_public_dns'] = bastion_node_public_dns
[ "\n Kwargs:\n\n `aws_region`: AWS region to create the cluster in.\n\n `aws_availability_zone`: The availability zone to create the cluster\n in.\n\n `vpc_id`: The vpc to create the cluster in.\n\n `subnet_id`: The subnet to create the cluster in.\n\n `bastion_node_public_dns`: Public dns name of the bastion host. Required only if\n cluster is in private subnet.\n " ]
Please provide a description of the function:def set_hadoop_settings(self, master_instance_type=None, slave_instance_type=None, initial_nodes=None, max_nodes=None, custom_config=None, slave_request_type=None, use_hbase=None, custom_ec2_tags=None, use_hadoop2=None, use_spark=None, is_ha=None): self.hadoop_settings['master_instance_type'] = master_instance_type self.hadoop_settings['slave_instance_type'] = slave_instance_type self.hadoop_settings['initial_nodes'] = initial_nodes self.hadoop_settings['max_nodes'] = max_nodes self.hadoop_settings['custom_config'] = custom_config self.hadoop_settings['slave_request_type'] = slave_request_type self.hadoop_settings['use_hbase'] = use_hbase self.hadoop_settings['use_hadoop2'] = use_hadoop2 self.hadoop_settings['use_spark'] = use_spark self.hadoop_settings['is_ha'] = is_ha if custom_ec2_tags and custom_ec2_tags.strip(): try: self.hadoop_settings['custom_ec2_tags'] = json.loads(custom_ec2_tags.strip()) except Exception as e: raise Exception("Invalid JSON string for custom ec2 tags: %s" % e.message)
[ "\n Kwargs:\n\n `master_instance_type`: The instance type to use for the Hadoop master\n node.\n\n `slave_instance_type`: The instance type to use for the Hadoop slave\n nodes.\n\n `initial_nodes`: Number of nodes to start the cluster with.\n\n `max_nodes`: Maximum number of nodes the cluster may be auto-scaled up\n to.\n\n `custom_config`: Custom Hadoop configuration overrides.\n\n `slave_request_type`: Purchasing option for slave instances.\n Valid values: \"ondemand\", \"hybrid\", \"spot\".\n\n `use_hbase`: Start hbase daemons on the cluster. Uses Hadoop2\n\n `use_hadoop2`: Use hadoop2 in this cluster\n\n `use_spark`: Use spark in this cluster\n\n `is_ha` : enable HA config for cluster\n\n " ]
Please provide a description of the function:def set_spot_instance_settings(self, maximum_bid_price_percentage=None, timeout_for_request=None, maximum_spot_instance_percentage=None): self.hadoop_settings['spot_instance_settings'] = { 'maximum_bid_price_percentage': maximum_bid_price_percentage, 'timeout_for_request': timeout_for_request, 'maximum_spot_instance_percentage': maximum_spot_instance_percentage}
[ "\n Purchase options for spot instances. Valid only when\n `slave_request_type` is hybrid or spot.\n\n `maximum_bid_price_percentage`: Maximum value to bid for spot\n instances, expressed as a percentage of the base price for the\n slave node instance type.\n\n `timeout_for_request`: Timeout for a spot instance request (Unit:\n minutes)\n\n `maximum_spot_instance_percentage`: Maximum percentage of instances\n that may be purchased from the AWS Spot market. Valid only when\n slave_request_type is \"hybrid\".\n " ]
Please provide a description of the function:def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None, timeout_for_request=None, allow_fallback=True): self.hadoop_settings['stable_spot_instance_settings'] = { 'maximum_bid_price_percentage': maximum_bid_price_percentage, 'timeout_for_request': timeout_for_request, 'allow_fallback': allow_fallback}
[ "\n Purchase options for stable spot instances.\n\n `maximum_bid_price_percentage`: Maximum value to bid for stable node spot\n instances, expressed as a percentage of the base price\n (applies to both master and slave nodes).\n\n `timeout_for_request`: Timeout for a stable node spot instance request (Unit:\n minutes)\n\n `allow_fallback`: Whether to fallback to on-demand instances for\n stable nodes if spot instances are not available\n " ]
Please provide a description of the function:def set_security_settings(self, encrypted_ephemerals=None, customer_ssh_key=None, persistent_security_group=None): self.security_settings['encrypted_ephemerals'] = encrypted_ephemerals self.security_settings['customer_ssh_key'] = customer_ssh_key self.security_settings['persistent_security_group'] = persistent_security_group
[ "\n Kwargs:\n\n `encrypted_ephemerals`: Encrypt the ephemeral drives on the instance.\n\n `customer_ssh_key`: SSH key to use to login to the instances.\n " ]
Please provide a description of the function:def set_presto_settings(self, enable_presto=None, presto_custom_config=None): self.presto_settings['enable_presto'] = enable_presto self.presto_settings['custom_config'] = presto_custom_config
[ "\n Kwargs:\n\n `enable_presto`: Enable Presto on the cluster.\n\n `presto_custom_config`: Custom Presto configuration overrides.\n " ]
Please provide a description of the function:def set_cluster_info(self, aws_access_key_id=None, aws_secret_access_key=None, aws_region=None, aws_availability_zone=None, vpc_id=None, subnet_id=None, master_elastic_ip=None, disallow_cluster_termination=None, enable_ganglia_monitoring=None, node_bootstrap_file=None, master_instance_type=None, slave_instance_type=None, initial_nodes=None, max_nodes=None, slave_request_type=None, fallback_to_ondemand=None, node_base_cooldown_period=None, node_spot_cooldown_period=None, custom_config=None, use_hbase=None, custom_ec2_tags=None, use_hadoop2=None, use_spark=None, use_qubole_placement_policy=None, maximum_bid_price_percentage=None, timeout_for_request=None, maximum_spot_instance_percentage=None, stable_maximum_bid_price_percentage=None, stable_timeout_for_request=None, stable_allow_fallback=True, spot_block_duration=None, ebs_volume_count=None, ebs_volume_type=None, ebs_volume_size=None, root_volume_size=None, fairscheduler_config_xml=None, default_pool=None, encrypted_ephemerals=None, ssh_public_key=None, persistent_security_group=None, enable_presto=None, bastion_node_public_dns=None, role_instance_profile=None, presto_custom_config=None, is_ha=None, env_name=None, python_version=None, r_version=None, enable_rubix=None): self.disallow_cluster_termination = disallow_cluster_termination self.enable_ganglia_monitoring = enable_ganglia_monitoring self.node_bootstrap_file = node_bootstrap_file self.set_node_configuration(master_instance_type, slave_instance_type, initial_nodes, max_nodes, slave_request_type, fallback_to_ondemand, custom_ec2_tags, node_base_cooldown_period, node_spot_cooldown_period, root_volume_size) self.set_ec2_settings(aws_access_key_id, aws_secret_access_key, aws_region, aws_availability_zone, vpc_id, subnet_id, master_elastic_ip, bastion_node_public_dns, role_instance_profile) self.set_hadoop_settings(custom_config, use_hbase, use_hadoop2, use_spark, use_qubole_placement_policy, is_ha, enable_rubix) self.set_spot_instance_settings(maximum_bid_price_percentage, timeout_for_request, maximum_spot_instance_percentage) self.set_stable_spot_instance_settings(stable_maximum_bid_price_percentage, stable_timeout_for_request, stable_allow_fallback) self.set_spot_block_settings(spot_block_duration) self.set_ebs_volume_settings(ebs_volume_count, ebs_volume_type, ebs_volume_size) self.set_fairscheduler_settings(fairscheduler_config_xml, default_pool) self.set_security_settings(encrypted_ephemerals, ssh_public_key, persistent_security_group) self.set_presto_settings(enable_presto, presto_custom_config) self.set_env_settings(env_name, python_version, r_version)
[ "\n Kwargs:\n\n `aws_access_key_id`: The access key id for customer's aws account. This\n is required for creating the cluster.\n\n `aws_secret_access_key`: The secret access key for customer's aws\n account. This is required for creating the cluster.\n\n `aws_region`: AWS region to create the cluster in.\n\n `aws_availability_zone`: The availability zone to create the cluster\n in.\n\n `vpc_id`: The vpc to create the cluster in.\n\n `subnet_id`: The subnet to create the cluster in.\n\n `master_elastic_ip`: Elastic IP to attach to master node\n\n `disallow_cluster_termination`: Set this to True if you don't want\n qubole to auto-terminate idle clusters. Use this option with\n extreme caution.\n\n `enable_ganglia_monitoring`: Set this to True if you want to enable\n ganglia monitoring for the cluster.\n\n `node_bootstrap_file`: name of the node bootstrap file for this\n cluster. It should be in stored in S3 at\n <your-default-location>/scripts/hadoop/\n\n `master_instance_type`: The instance type to use for the Hadoop master\n node.\n\n `slave_instance_type`: The instance type to use for the Hadoop slave\n nodes.\n\n `initial_nodes`: Number of nodes to start the cluster with.\n\n `max_nodes`: Maximum number of nodes the cluster may be auto-scaled up\n to.\n\n `slave_request_type`: Purchasing option for slave instances.\n Valid values: \"ondemand\", \"hybrid\", \"spot\".\n\n `fallback_to_ondemand`: Fallback to on-demand nodes if spot nodes could not be\n obtained. Valid only if slave_request_type is 'spot'.\n\n `node_base_cooldown_period`: Time for which an on-demand node waits before termination (Unit: minutes)\n\n `node_spot_cooldown_period`: Time for which a spot node waits before termination (Unit: minutes)\n\n `custom_config`: Custom Hadoop configuration overrides.\n\n `use_hbase`: Start hbase daemons on the cluster. Uses Hadoop2\n\n `use_hadoop2`: Use hadoop2 in this cluster\n\n `use_spark`: Use spark in this cluster\n\n `use_qubole_placement_policy`: Use Qubole Block Placement policy for \n clusters with spot nodes.\n\n `maximum_bid_price_percentage`: ( Valid only when `slave_request_type` \n is hybrid or spot.) Maximum value to bid for spot\n instances, expressed as a percentage of the base price \n for the slave node instance type.\n\n `timeout_for_request`: Timeout for a spot instance request (Unit:\n minutes)\n\n `maximum_spot_instance_percentage`: Maximum percentage of instances\n that may be purchased from the AWS Spot market. Valid only when\n slave_request_type is \"hybrid\".\n\n `stable_maximum_bid_price_percentage`: Maximum value to bid for stable node spot\n instances, expressed as a percentage of the base price\n (applies to both master and slave nodes).\n\n `stable_timeout_for_request`: Timeout for a stable node spot instance request (Unit:\n minutes)\n\n `stable_allow_fallback`: Whether to fallback to on-demand instances for\n stable nodes if spot instances are not available\n\n `spot_block_duration`: Time for which the spot block instance is provisioned (Unit:\n minutes)\n\n `ebs_volume_count`: Number of EBS volumes to attach \n to each instance of the cluster.\n\n `ebs_volume_type`: Type of the EBS volume. Valid \n values are 'standard' (magnetic) and 'ssd'.\n\n `ebs_volume_size`: Size of each EBS volume, in GB. \n\n `root_volume_size`: Size of root volume, in GB.\n\n `fairscheduler_config_xml`: XML string with custom configuration\n parameters for the fair scheduler.\n\n `default_pool`: The default pool for the fair scheduler.\n\n `encrypted_ephemerals`: Encrypt the ephemeral drives on the instance.\n\n `ssh_public_key`: SSH key to use to login to the instances.\n\n `persistent_security_group`: Comma-separated list of persistent \n security groups for the cluster.\n\n `enable_presto`: Enable Presto on the cluster.\n\n `presto_custom_config`: Custom Presto configuration overrides.\n\n `bastion_node_public_dns`: Public dns name of the bastion node. Required only if cluster is in private subnet.\n\n `is_ha`: Enabling HA config for cluster\n\n `env_name`: Name of python and R environment. (For Spark clusters)\n\n `python_version`: Version of Python for environment. (For Spark clusters)\n\n `r_version`: Version of R for environment. (For Spark clusters)\n\n `enable_rubix`: Enable rubix on the cluster (For Presto clusters)\n " ]
Please provide a description of the function:def minimal_payload(self): payload_dict = self.__dict__ payload_dict.pop("api_version", None) return util._make_minimal(payload_dict)
[ "\n This method can be used to create the payload which is sent while\n creating or updating a cluster.\n " ]
Please provide a description of the function:def _handle_error(response): code = response.status_code if 200 <= code < 400: return if code == 400: sys.stderr.write(response.text + "\n") raise BadRequest(response) elif code == 401: sys.stderr.write(response.text + "\n") raise UnauthorizedAccess(response) elif code == 403: sys.stderr.write(response.text + "\n") raise ForbiddenAccess(response) elif code == 404: sys.stderr.write(response.text + "\n") raise ResourceNotFound(response) elif code == 405: sys.stderr.write(response.text + "\n") raise MethodNotAllowed(response) elif code == 409: sys.stderr.write(response.text + "\n") raise ResourceConflict(response) elif code == 422: sys.stderr.write(response.text + "\n") raise ResourceInvalid(response) elif code in (449, 502, 503, 504): sys.stderr.write(response.text + "\n") raise RetryWithDelay(response) elif 401 <= code < 500: sys.stderr.write(response.text + "\n") raise ClientError(response) elif 500 <= code < 600: sys.stderr.write(response.text + "\n") raise ServerError(response) else: raise ConnectionError(response)
[ "Raise exceptions in response to any http errors\n\n Args:\n response: A Response object\n\n Raises:\n BadRequest: if HTTP error code 400 returned.\n UnauthorizedAccess: if HTTP error code 401 returned.\n ForbiddenAccess: if HTTP error code 403 returned.\n ResourceNotFound: if HTTP error code 404 is returned.\n MethodNotAllowed: if HTTP error code 405 is returned.\n ResourceConflict: if HTTP error code 409 is returned.\n ResourceInvalid: if HTTP error code 422 is returned.\n ClientError: if HTTP error code falls in 401 - 499.\n ServerError: if HTTP error code falls in 500 - 599.\n ConnectionError: if unknown HTTP error code returned.\n " ]
Please provide a description of the function:def createTemplate(data): conn = Qubole.agent() return conn.post(Template.rest_entity_path, data)
[ "\n Create a new template.\n\n Args:\n `data`: json data required for creating a template\n Returns:\n Dictionary containing the details of the template with its ID.\n " ]
Please provide a description of the function:def editTemplate(id, data): conn = Qubole.agent() return conn.put(Template.element_path(id), data)
[ "\n Edit an existing template.\n\n Args:\n `id`: ID of the template to edit\n `data`: json data to be updated\n Returns:\n Dictionary containing the updated details of the template.\n " ]
Please provide a description of the function:def viewTemplate(id): conn = Qubole.agent() return conn.get(Template.element_path(id))
[ "\n View an existing Template details.\n\n Args:\n `id`: ID of the template to fetch\n \n Returns:\n Dictionary containing the details of the template.\n " ]
Please provide a description of the function:def submitTemplate(id, data={}): conn = Qubole.agent() path = str(id) + "/run" return conn.post(Template.element_path(path), data)
[ "\n Submit an existing Template.\n\n Args:\n `id`: ID of the template to submit\n `data`: json data containing the input_vars \n Returns:\n Dictionary containing Command Object details. \n " ]
Please provide a description of the function:def runTemplate(id, data={}): conn = Qubole.agent() path = str(id) + "/run" res = conn.post(Template.element_path(path), data) cmdType = res['command_type'] cmdId = res['id'] cmdClass = eval(cmdType) cmd = cmdClass.find(cmdId) while not Command.is_done(cmd.status): time.sleep(Qubole.poll_interval) cmd = cmdClass.find(cmd.id) return Template.getResult(cmdClass, cmd)
[ "\n Run an existing Template and waits for the Result.\n Prints result to stdout. \n\n Args:\n `id`: ID of the template to run\n `data`: json data containing the input_vars\n \n Returns: \n An integer as status (0: success, 1: failure)\n " ]
Please provide a description of the function:def listTemplates(data={}): conn = Qubole.agent() url_path = Template.rest_entity_path page_attr = [] if "page" in data and data["page"] is not None: page_attr.append("page=%s" % data["page"]) if "per_page" in data and data["per_page"] is not None: page_attr.append("per_page=%s" % data["per_page"]) if page_attr: url_path = "%s?%s" % (url_path, "&".join(page_attr)) return conn.get(url_path)
[ "\n Fetch existing Templates details.\n\n Args:\n `data`: dictionary containing the value of page number and per-page value\n Returns:\n Dictionary containing paging_info and command_templates details\n " ]
Please provide a description of the function:def edit(args): tap = DbTap.find(args.id) options = {} if not args.name is None: options["db_name"]=args.name if args.host is not None: options["db_host"]=args.host if args.user is not None: options["db_user"]=args.user if args.password is not None: options["db_passwd"] = args.password if args.type is not None: options["db_type"] = args.type if args.location is not None: options["db_location"] = args.location if args.port is not None: options["port"] = args.port tap = tap.edit(**options) return json.dumps(tap.attributes, sort_keys=True, indent=4)
[ " Carefully setup a dict " ]
Please provide a description of the function:def show(cls, app_id): conn = Qubole.agent() return conn.get(cls.element_path(app_id))
[ "\n Shows an app by issuing a GET request to the /apps/ID endpoint.\n " ]
Please provide a description of the function:def create(cls, name, config=None, kind="spark"): conn = Qubole.agent() return conn.post(cls.rest_entity_path, data={'name': name, 'config': config, 'kind': kind})
[ "\n Create a new app.\n\n Args:\n `name`: the name of the app\n\n `config`: a dictionary of key-value pairs\n\n `kind`: kind of the app (default=spark)\n " ]
Please provide a description of the function:def stop(cls, app_id): conn = Qubole.agent() return conn.put(cls.element_path(app_id) + "/stop")
[ "\n Stops an app by issuing a PUT request to the /apps/ID/stop endpoint.\n " ]
Please provide a description of the function:def delete(cls, app_id): conn = Qubole.agent() return conn.delete(cls.element_path(app_id))
[ "\n Delete an app by issuing a DELETE request to the /apps/ID endpoint.\n " ]
Please provide a description of the function:def configure(cls, api_token, api_url="https://api.qubole.com/api/", version="v1.2", poll_interval=5, skip_ssl_cert_check=False, cloud_name="AWS"): cls._auth = QuboleAuth(api_token) cls.api_token = api_token cls.version = version cls.baseurl = api_url if poll_interval < Qubole.MIN_POLL_INTERVAL: log.warn("Poll interval cannot be less than %s seconds. Setting it to %s seconds.\n" % (Qubole.MIN_POLL_INTERVAL, Qubole.MIN_POLL_INTERVAL)) cls.poll_interval = Qubole.MIN_POLL_INTERVAL else: cls.poll_interval = poll_interval cls.skip_ssl_cert_check = skip_ssl_cert_check cls.cloud_name = cloud_name.lower() cls.cached_agent = None
[ "\n Set parameters governing interaction with QDS\n\n Args:\n `api_token`: authorization token for QDS. required\n\n `api_url`: the base URL for QDS API. configurable for testing only\n\n `version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)\n\n `poll_interval`: interval in secs when polling QDS for events\n " ]
Please provide a description of the function:def agent(cls, version=None): reuse_cached_agent = True if version: log.debug("api version changed to %s" % version) cls.rest_url = '/'.join([cls.baseurl.rstrip('/'), version]) reuse_cached_agent = False else: cls.rest_url = '/'.join([cls.baseurl.rstrip('/'), cls.version]) if cls.api_token is None: raise ConfigError("No API Token specified - please supply one via Qubole.configure()") if not reuse_cached_agent: uncached_agent = Connection(cls._auth, cls.rest_url, cls.skip_ssl_cert_check) return uncached_agent if cls.cached_agent is None: cls.cached_agent = Connection(cls._auth, cls.rest_url, cls.skip_ssl_cert_check) return cls.cached_agent
[ "\n Returns:\n a connection object to make REST calls to QDS\n\n optionally override the `version` of the REST endpoint for advanced\n features available only in the newer version of the API available\n for certain resource end points eg: /v1.3/cluster. When version is\n None we default to v1.2\n " ]
Please provide a description of the function:def show(cls, report_name, data): conn = Qubole.agent() return conn.get(cls.element_path(report_name), data)
[ "\n Shows a report by issuing a GET request to the /reports/report_name\n endpoint.\n\n Args:\n `report_name`: the name of the report to show\n\n `data`: the parameters for the report\n " ]
Please provide a description of the function:def get_cluster_request_parameters(cluster_info, cloud_config, engine_config): ''' Use this to return final minimal request from cluster_info, cloud_config or engine_config objects Alternatively call util._make_minimal if only one object needs to be implemented ''' cluster_request = {} cloud_config = util._make_minimal(cloud_config.__dict__) if bool(cloud_config): cluster_request['cloud_config'] = cloud_config engine_config = util._make_minimal(engine_config.__dict__) if bool(engine_config): cluster_request['engine_config'] = engine_config cluster_request.update(util._make_minimal(cluster_info.__dict__)) return cluster_request
[]
Please provide a description of the function:def set_cluster_info(self, disallow_cluster_termination=None, enable_ganglia_monitoring=None, datadog_api_token=None, datadog_app_token=None, node_bootstrap=None, master_instance_type=None, slave_instance_type=None, min_nodes=None, max_nodes=None, slave_request_type=None, fallback_to_ondemand=None, node_base_cooldown_period=None, node_spot_cooldown_period=None, custom_tags=None, heterogeneous_config=None, maximum_bid_price_percentage=None, timeout_for_request=None, maximum_spot_instance_percentage=None, stable_maximum_bid_price_percentage=None, stable_timeout_for_request=None, stable_spot_fallback=None, spot_block_duration=None, idle_cluster_timeout=None, disk_count=None, disk_type=None, disk_size=None, root_disk_size=None, upscaling_config=None, enable_encryption=None, customer_ssh_key=None, cluster_name=None, force_tunnel=None, image_uri_overrides=None, env_name=None, python_version=None, r_version=None, disable_cluster_pause=None, paused_cluster_timeout_mins=None, disable_autoscale_node_pause=None, paused_autoscale_node_timeout_mins=None): self.cluster_info['master_instance_type'] = master_instance_type self.cluster_info['slave_instance_type'] = slave_instance_type self.cluster_info['min_nodes'] = min_nodes self.cluster_info['max_nodes'] = max_nodes self.cluster_info['cluster_name'] = cluster_name self.cluster_info['node_bootstrap'] = node_bootstrap self.cluster_info['disallow_cluster_termination'] = disallow_cluster_termination self.cluster_info['force_tunnel'] = force_tunnel self.cluster_info['fallback_to_ondemand'] = fallback_to_ondemand self.cluster_info['node_base_cooldown_period'] = node_base_cooldown_period self.cluster_info['node_spot_cooldown_period'] = node_spot_cooldown_period self.cluster_info['customer_ssh_key'] = customer_ssh_key if custom_tags and custom_tags.strip(): try: self.cluster_info['custom_tags'] = json.loads(custom_tags.strip()) except Exception as e: raise Exception("Invalid JSON string for custom ec2 tags: %s" % e.message) self.cluster_info['heterogeneous_config'] = heterogeneous_config self.cluster_info['slave_request_type'] = slave_request_type self.cluster_info['idle_cluster_timeout'] = idle_cluster_timeout self.cluster_info['spot_settings'] = {} self.cluster_info['rootdisk'] = {} self.cluster_info['rootdisk']['size'] = root_disk_size self.set_spot_instance_settings(maximum_bid_price_percentage, timeout_for_request, maximum_spot_instance_percentage) self.set_stable_spot_bid_settings(stable_maximum_bid_price_percentage, stable_timeout_for_request, stable_spot_fallback) self.set_spot_block_settings(spot_block_duration) self.set_data_disk(disk_size, disk_count, disk_type, upscaling_config, enable_encryption) self.set_monitoring(enable_ganglia_monitoring, datadog_api_token, datadog_app_token) self.set_internal(image_uri_overrides) self.set_env_settings(env_name, python_version, r_version) self.set_start_stop_settings(disable_cluster_pause, paused_cluster_timeout_mins, disable_autoscale_node_pause, paused_autoscale_node_timeout_mins)
[ "\n Args:\n\n `disallow_cluster_termination`: Set this to True if you don't want\n qubole to auto-terminate idle clusters. Use this option with\n extreme caution.\n\n `enable_ganglia_monitoring`: Set this to True if you want to enable\n ganglia monitoring for the cluster.\n\n `node_bootstrap`: name of the node bootstrap file for this\n cluster. It should be in stored in S3 at\n <your-default-location>/scripts/hadoop/\n\n `master_instance_type`: The instance type to use for the Hadoop master\n node.\n\n `slave_instance_type`: The instance type to use for the Hadoop slave\n nodes.\n\n `min_nodes`: Number of nodes to start the cluster with.\n\n `max_nodes`: Maximum number of nodes the cluster may be auto-scaled up\n to.\n\n `slave_request_type`: Purchasing option for slave instances.\n Valid values: \"ondemand\", \"hybrid\", \"spot\".\n\n `fallback_to_ondemand`: Fallback to on-demand nodes if spot nodes could not be\n obtained. Valid only if slave_request_type is 'spot'.\n\n `node_base_cooldown_period`: Time for which an on-demand node waits before termination (Unit: minutes)\n\n `node_spot_cooldown_period`: Time for which a spot node waits before termination (Unit: minutes)\n\n `maximum_bid_price_percentage`: ( Valid only when `slave_request_type`\n is hybrid or spot.) Maximum value to bid for spot\n instances, expressed as a percentage of the base price\n for the slave node instance type.\n\n `timeout_for_request`: Timeout for a spot instance request (Unit:\n minutes)\n\n `maximum_spot_instance_percentage`: Maximum percentage of instances\n that may be purchased from the AWS Spot market. Valid only when\n slave_request_type is \"hybrid\".\n\n `stable_maximum_bid_price_percentage`: Maximum value to bid for stable node spot\n instances, expressed as a percentage of the base price\n (applies to both master and slave nodes).\n\n `stable_timeout_for_request`: Timeout for a stable node spot instance request (Unit:\n minutes)\n\n `stable_spot_fallback`: Whether to fallback to on-demand instances for\n stable nodes if spot instances are not available\n\n `spot_block_duration`: Time for which the spot block instance is provisioned (Unit:\n minutes)\n\n `disk_count`: Number of EBS volumes to attach\n to each instance of the cluster.\n\n `disk_type`: Type of the EBS volume. Valid\n values are 'standard' (magnetic) and 'ssd'.\n\n `disk_size`: Size of each EBS volume, in GB.\n\n `root_disk_size`: Size of root volume, in GB.\n\n `enable_encryption`: Encrypt the ephemeral drives on the instance.\n\n `customer_ssh_key`: SSH key to use to login to the instances.\n\n `idle_cluster_timeout`: The buffer time (range in 0-6 hrs) after a cluster goes idle\n and gets terminated, given cluster auto termination is on and no cluster specific\n timeout has been set (default is 2 hrs)\n\n `heterogeneous_config` : Configuring heterogeneous nodes in Hadoop 2 and Spark clusters.\n It implies that slave nodes can be of different instance types\n\n `custom_tags` : Custom tags to be set on all instances\n of the cluster. Specified as JSON object (key-value pairs)\n\n `datadog_api_token` : Specify the Datadog API token to use the Datadog monitoring service\n\n `datadog_app_token` : Specify the Datadog APP token to use the Datadog monitoring service\n\n `image_uri_overrides` : Override the image name provided\n\n `env_name`: Name of python and R environment. (For Spark clusters)\n\n `python_version`: Version of Python for environment. (For Spark clusters)\n\n `r_version`: Version of R for environment. (For Spark clusters)\n\n `disable_cluster_pause`: Disable cluster pause\n\n `paused_cluster_timeout_mins`: Paused cluster timeout in mins\n\n `disable_autoscale_node_pause`: Disable autoscale node pause\n\n `paused_autoscale_node_timeout_mins`: Paused autoscale node timeout in mins\n\n Doc: For getting details about arguments\n http://docs.qubole.com/en/latest/rest-api/cluster_api/create-new-cluster.html#parameters\n\n " ]
Please provide a description of the function:def create(cls, cluster_info): conn = Qubole.agent(version="v2") return conn.post(cls.rest_entity_path, data=cluster_info)
[ "\n Create a new cluster using information provided in `cluster_info`.\n " ]
Please provide a description of the function:def update(cls, cluster_id_label, cluster_info): conn = Qubole.agent(version="v2") return conn.put(cls.element_path(cluster_id_label), data=cluster_info)
[ "\n Update the cluster with id/label `cluster_id_label` using information provided in\n `cluster_info`.\n " ]
Please provide a description of the function:def clone(cls, cluster_id_label, cluster_info): conn = Qubole.agent(version="v2") return conn.post(cls.element_path(cluster_id_label) + '/clone', data=cluster_info)
[ "\n Update the cluster with id/label `cluster_id_label` using information provided in\n `cluster_info`.\n " ]
Please provide a description of the function:def list(cls, label=None, cluster_id=None, state=None, page=None, per_page=None): if cluster_id is not None: return cls.show(cluster_id) if label is not None: return cls.show(label) params = {} if page: params['page'] = page if per_page: params['per_page'] = per_page params = None if not params else params conn = Qubole.agent(version="v2") cluster_list = conn.get(cls.rest_entity_path) if state is None: # return the complete list since state is None return conn.get(cls.rest_entity_path, params=params) # filter clusters based on state result = [] if 'clusters' in cluster_list: for cluster in cluster_list['clusters']: if state.lower() == cluster['state'].lower(): result.append(cluster) return result
[ "\n List existing clusters present in your account.\n\n Kwargs:\n `state`: list only those clusters which are in this state\n `page`: page number\n `per_page`: number of clusters to be retrieved per page\n\n Returns:\n List of clusters satisfying the given criteria\n " ]
Please provide a description of the function:def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None): ''' Downloads the contents of all objects in s3_path into fp Args: `boto_conn`: S3 connection object `s3_path`: S3 path to be downloaded `fp`: The file object where data is to be downloaded ''' #Progress bar to display download progress def _callback(downloaded, total): ''' Call function for upload. `downloaded`: File size already downloaded (int) `total`: Total file size to be downloaded (int) ''' if (total is 0) or (downloaded == total): return progress = downloaded*100/total sys.stderr.write('\r[{0}] {1}%'.format('#'*progress, progress)) sys.stderr.flush() m = _URI_RE.match(s3_path) bucket_name = m.group(1) bucket = boto_conn.get_bucket(bucket_name) retries = 6 if s3_path.endswith('/') is False: #It is a file key_name = m.group(2) key_instance = bucket.get_key(key_name) while key_instance is None and retries > 0: retries = retries - 1 log.info("Results file is not available on s3. Retry: " + str(6-retries)) time.sleep(10) key_instance = bucket.get_key(key_name) if key_instance is None: raise Exception("Results file not available on s3 yet. This can be because of s3 eventual consistency issues.") log.info("Downloading file from %s" % s3_path) if delim is None: try: key_instance.get_contents_to_file(fp) # cb=_callback except boto.exception.S3ResponseError as e: if (e.status == 403): # SDK-191, boto gives an error while fetching the objects using versions which happens by default # in the get_contents_to_file() api. So attempt one without specifying version. log.warn("Access denied while fetching the s3 object. Retrying without specifying the version....") key_instance.open() fp.write(key_instance.read()) key_instance.close() else: raise else: # Get contents as string. Replace parameters and write to file. _read_iteratively(key_instance, fp, delim=delim) else: #It is a folder key_prefix = m.group(2) bucket_paths = bucket.list(key_prefix) for one_path in bucket_paths: name = one_path.name # Eliminate _tmp_ files which ends with $folder$ if name.endswith('$folder$'): continue log.info("Downloading file from %s" % name) if delim is None: one_path.get_contents_to_file(fp) # cb=_callback else: _read_iteratively(one_path, fp, delim=delim)
[]
Please provide a description of the function:def list(cls, **kwargs): conn = Qubole.agent() params = {} for k in kwargs: if kwargs[k]: params[k] = kwargs[k] params = None if not params else params return conn.get(cls.rest_entity_path, params=params)
[ "\n List a command by issuing a GET request to the /command endpoint\n\n Args:\n `**kwargs`: Various parameters can be used to filter the commands such as:\n * command_type - HiveQuery, PrestoQuery, etc. The types should be in title case.\n * status - failed, success, etc\n * name\n * command_id\n * qbol_user_id\n * command_source\n * page\n * cluster_label\n * session_id, etc\n\n For example - Command.list(command_type = \"HiveQuery\", status = \"success\")\n " ]
Please provide a description of the function:def create(cls, **kwargs): conn = Qubole.agent() if kwargs.get('command_type') is None: kwargs['command_type'] = cls.__name__ if kwargs.get('tags') is not None: kwargs['tags'] = kwargs['tags'].split(',') return cls(conn.post(cls.rest_entity_path, data=kwargs))
[ "\n Create a command object by issuing a POST request to the /command endpoint\n Note - this does not wait for the command to complete\n\n Args:\n `**kwargs`: keyword arguments specific to command type\n\n Returns:\n Command object\n " ]
Please provide a description of the function:def run(cls, **kwargs): # vars to keep track of actual logs bytes (err, tmp) and new bytes seen in each iteration err_pointer, tmp_pointer, new_bytes = 0, 0, 0 print_logs_live = kwargs.pop("print_logs_live", None) # We don't want to send this to the API. cmd = cls.create(**kwargs) sighandler = SignalHandler() while not Command.is_done(cmd.status): if sighandler.received_term_signal: logging.warning("Received signal {}. Canceling Qubole Command ID: {}".format(sighandler.last_signal, cmd.id)) cls.cancel(cmd) exit() time.sleep(Qubole.poll_interval) cmd = cls.find(cmd.id) if print_logs_live is True: log, err_length, tmp_length = cmd.get_log_partial(err_pointer, tmp_pointer) # if err length is non zero, then tmp_pointer needs to be reset to the current tmp_length as the # err_length will contain the full set of logs from last seen non-zero err_length. if err_length != "0": err_pointer += int(err_length) new_bytes = int(err_length) + int(tmp_length) - tmp_pointer tmp_pointer = int(tmp_length) else: tmp_pointer += int(tmp_length) new_bytes = int(tmp_length) if len(log) > 0 and new_bytes > 0: print(log[-new_bytes:], file=sys.stderr) return cmd
[ "\n Create a command object by issuing a POST request to the /command endpoint\n Waits until the command is complete. Repeatedly polls to check status\n\n Args:\n `**kwargs`: keyword arguments specific to command type\n\n Returns:\n Command object\n " ]
Please provide a description of the function:def cancel_id(cls, id): conn = Qubole.agent() data = {"status": "kill"} return conn.put(cls.element_path(id), data)
[ "\n Cancels command denoted by this id\n\n Args:\n `id`: command id\n " ]
Please provide a description of the function:def get_log_id(cls, id): conn = Qubole.agent() r = conn.get_raw(cls.element_path(id) + "/logs") return r.text
[ "\n Fetches log for the command represented by this id\n\n Args:\n `id`: command id\n " ]
Please provide a description of the function:def get_log(self): log_path = self.meta_data['logs_resource'] conn = Qubole.agent() r = conn.get_raw(log_path) return r.text
[ "\n Fetches log for the command represented by this object\n\n Returns:\n The log as a string\n " ]
Please provide a description of the function:def get_log_partial(self, err_pointer=0, tmp_pointer=0): log_path = self.meta_data['logs_resource'] conn = Qubole.agent() r = conn.get_raw(log_path, params={'err_file_processed':err_pointer, 'tmp_file_processed':tmp_pointer}) if 'err_length' in r.headers.keys() and 'tmp_length' in r.headers.keys(): return [r.text, r.headers['err_length'], r.headers['tmp_length']] return [r.text, 0, 0]
[ "\n Fetches log (full or partial) for the command represented by this object\n Accepts:\n err_pointer(int): Pointer to err text bytes we've received so far, which will be passed to next api call\n to indicate pointer to fetch logs.\n tmp_pointer(int): Same as err_pointer except it indicates the bytes of tmp file processed.\n Returns:\n An array where the first field is actual log (string), while 2nd & 3rd are counts of err and tmp bytes\n which have been returned by api in addition to the given pointers.\n " ]
Please provide a description of the function:def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True, qlog=None, arguments=[]): result_path = self.meta_data['results_resource'] conn = Qubole.agent() include_header = "false" if len(arguments) == 1: include_header = arguments.pop(0) if include_header not in ('true', 'false'): raise ParseError("incude_header can be either true or false") r = conn.get(result_path, {'inline': inline, 'include_headers': include_header}) if r.get('inline'): raw_results = r['results'] encoded_results = raw_results.encode('utf8') if sys.version_info < (3, 0, 0): fp.write(encoded_results) else: import io if isinstance(fp, io.TextIOBase): if hasattr(fp, 'buffer'): fp.buffer.write(encoded_results) else: fp.write(raw_results) elif isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase): fp.write(encoded_results) else: # Can this happen? Don't know what's the right thing to do in this case. pass else: if fetch: storage_credentials = conn.get(Account.credentials_rest_entity_path) if storage_credentials['region_endpoint'] is not None: boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'], aws_secret_access_key=storage_credentials['storage_secret_key'], security_token = storage_credentials['session_token'], host = storage_credentials['region_endpoint']) else: boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'], aws_secret_access_key=storage_credentials['storage_secret_key'], security_token=storage_credentials['session_token']) log.info("Starting download from result locations: [%s]" % ",".join(r['result_location'])) #fetch latest value of num_result_dir num_result_dir = Command.find(self.id).num_result_dir # If column/header names are not able to fetch then use include header as true if include_header.lower() == "true" and qlog is not None: write_headers(qlog, fp) for s3_path in r['result_location']: # In Python 3, # If the delim is None, fp should be in binary mode because # boto expects it to be. # If the delim is not None, then both text and binary modes # work. _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim) else: fp.write(",".join(r['result_location']))
[ "\n Fetches the result for the command represented by this object\n\n get_results will retrieve results of the command and write to stdout by default.\n Optionally one can write to a filestream specified in `fp`. The `inline` argument\n decides whether the result can be returned as a CRLF separated string. In cases where\n the results are greater than 20MB, get_results will attempt to read from s3 and write\n to fp. The retrieval of results from s3 can be turned off by the `fetch` argument\n\n Args:\n `fp`: a file object to write the results to directly\n `inline`: whether or not results are returned inline as CRLF separated string\n `fetch`: True to fetch the result even if it is greater than 20MB, False to\n only get the result location on s3\n " ]
Please provide a description of the function:def parse(cls, args): try: (options, args) = cls.optparser.parse_args(args) except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None SparkCommand.validate_program(options) SparkCommand.validate_script_location(options) SparkCommand.validate_cmdline(options) SparkCommand.validate_sql(options) if options.macros is not None: options.macros = json.loads(options.macros) v = vars(options) v["command_type"] = "SparkCommand" return v
[ "\n Parse command line arguments to construct a dictionary of command\n parameters that can be used to create a command\n\n Args:\n `args`: sequence of arguments\n\n Returns:\n Dictionary that can be used in create method\n\n Raises:\n ParseError: when the arguments are not correct\n " ]