code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def upload(self, payload=None, content_type=None):
"""
Upload the archive at `path` with content type `content_type`
returns (int): upload status code
"""
# platform - prefer the value passed in to func over config
payload = payload or self.config.payload
content_type = content_type or self.config.content_type
if payload is None:
raise ValueError('Specify a file to upload.')
if not os.path.exists(payload):
raise IOError('Cannot upload %s: File does not exist.' % payload)
upload_results = client.upload(
self.config, self.connection, payload, content_type)
# return api response
return upload_results | Upload the archive at `path` with content type `content_type`
returns (int): upload status code | Below is the the instruction that describes the task:
### Input:
Upload the archive at `path` with content type `content_type`
returns (int): upload status code
### Response:
def upload(self, payload=None, content_type=None):
"""
Upload the archive at `path` with content type `content_type`
returns (int): upload status code
"""
# platform - prefer the value passed in to func over config
payload = payload or self.config.payload
content_type = content_type or self.config.content_type
if payload is None:
raise ValueError('Specify a file to upload.')
if not os.path.exists(payload):
raise IOError('Cannot upload %s: File does not exist.' % payload)
upload_results = client.upload(
self.config, self.connection, payload, content_type)
# return api response
return upload_results |
def normalizeInternalObjectType(value, cls, name):
"""
Normalizes an internal object type.
* **value** must be a instance of **cls**.
* Returned value is the same type as the input value.
"""
if not isinstance(value, cls):
raise TypeError("%s must be a %s instance, not %s."
% (name, name, type(value).__name__))
return value | Normalizes an internal object type.
* **value** must be a instance of **cls**.
* Returned value is the same type as the input value. | Below is the the instruction that describes the task:
### Input:
Normalizes an internal object type.
* **value** must be a instance of **cls**.
* Returned value is the same type as the input value.
### Response:
def normalizeInternalObjectType(value, cls, name):
"""
Normalizes an internal object type.
* **value** must be a instance of **cls**.
* Returned value is the same type as the input value.
"""
if not isinstance(value, cls):
raise TypeError("%s must be a %s instance, not %s."
% (name, name, type(value).__name__))
return value |
def mergeNewMSBWT(mergedDir, inputBwtDirs, numProcs, logger):
'''
This function will take a list of input BWTs (compressed or not) and merge them into a single BWT
@param mergedFN - the destination for the final merged MSBWT
@param inputBWTFN1 - the fn of the first BWT to merge
@param inputBWTFN2 - the fn of the second BWT to merge
@param numProcs - number of processes we're allowed to use
@param logger - output goes here
'''
st = time.time()
iterst = time.time()
vcLen = 6
#TODO: take advantage of these to skip an iteration or two perhaps
numInputs = len(inputBwtDirs)
msbwts = [None]*numInputs
mergedLength = 0
for i, dirName in enumerate(inputBwtDirs):
'''
NOTE: in practice, since we're allowing for multiprocessing, we construct the FM-index for each input BWT
simply because in the long run, this allows us to figure out how to start processing chunks separately.
Without this, we would need to track extra information that really just represent the FM-index.
'''
msbwts[i] = MultiStringBWT.loadBWT(dirName, logger)
mergedLength += msbwts[i].totalSize
#binSize = 2**1#small bin debugging
#binSize = 2**15#this one is just for test purposes, makes easy to debug things
#binSize = 2**25#diff in 22-23 is not that much, 23-24 was 8 seconds of difference, so REALLY no diff
binSize = 2**28
#allocate the mergedBWT space
logger.info('Allocating space on disk...')
mergedBWT = np.lib.format.open_memmap(mergedDir+'/msbwt.npy', 'w+', '<u1', (mergedLength,))
#this one will create the array using bits
logger.info('Initializing iterations...')
placeArray = np.lib.format.open_memmap(mergedDir+'/temp.0.npy', 'w+', '<u1', (mergedBWT.shape[0],))
copiedPlaceArray = np.lib.format.open_memmap(mergedDir+'/temp.1.npy', 'w+', '<u1', (mergedBWT.shape[0],))
start = msbwts[0].totalSize
end = 0
#fill out the initial array with 0s, 1s, 2s, etc. as our initial condition
for i, msbwt in enumerate(msbwts):
end += msbwt.getTotalSize()
placeArray[start:end].fill(i)
copiedPlaceArray[start:end].fill(i)
start = end
#create something to track the offsets
#TODO: x/binSize + 1 makes one too many bins if it's exactly divisible by binSize, ex: 4 length BWT with binSize 2
nextBinHasChanged = np.ones(dtype='b', shape=(mergedBWT.shape[0]/binSize+1,))
prevOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs))
currOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs))
nextOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs))
binUpdates = [{}]*(mergedBWT.shape[0]/binSize+1)
bwtInd = 0
offsets = [0]*numInputs
for x in xrange(0, currOffsetCounts.shape[0]):
#set, then change for next iter
nextOffsetCounts[x] = offsets
remaining = binSize
while remaining > 0 and bwtInd < numInputs:
if remaining > msbwts[bwtInd].totalSize-offsets[bwtInd]:
remaining -= msbwts[bwtInd].totalSize-offsets[bwtInd]
offsets[bwtInd] = msbwts[bwtInd].totalSize
bwtInd += 1
else:
offsets[bwtInd] += remaining
remaining = 0
ignored = 0
#original
sys.stdout.write('\rcp ')
sys.stdout.flush()
del copiedPlaceArray
needsMoreIterations = True
i = 0
sameOffsetCount = 0
while needsMoreIterations:
prevOffsetCounts = currOffsetCounts
currOffsetCounts = nextOffsetCounts
nextOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs))
needsMoreIterations = False
sameOffsetCount = 0
#this method uses a condensed byte and will ignore regions that are already finished
sys.stdout.write('\rld ')
sys.stdout.flush()
ignored = 0
iteret = time.time()
sys.stdout.write('\r')
logger.info('Finished iter '+str(i)+' in '+str(iteret-iterst)+'seconds')
iterst = time.time()
i += 1
sys.stdout.write('\rld')
sys.stdout.flush()
#track which bins are actually different
binHasChanged = nextBinHasChanged
nextBinHasChanged = np.zeros(dtype='b', shape=(mergedBWT.shape[0]/binSize+1))
tups = []
for x in xrange(0, mergedBWT.shape[0]/binSize + 1):
#check if the current offset matches the previous iteration offset
sameOffset = np.array_equal(currOffsetCounts[x], prevOffsetCounts[x])
if sameOffset:
sameOffsetCount += 1
'''
TODO: the below False is there because this only works if you do a full file copy right now. It's
because unless we copy, then the appropriate parts of the nextPlaceArray isn't properly updated. It's
unclear whether one of these is better than the other in terms of performance. File copying is slow, but
if only a couple sequences are similar then then skipping is good. I think in general, we only skip at the
beginning for real data though, so I'm going with the no-skip, no-copy form until I can resolve the
problem (if there's a resolution).
'''
if False and not binHasChanged[x] and sameOffset:
for key in binUpdates[x]:
nextOffsetCounts[key] += binUpdates[x][key]
ignored += 1
else:
#note these are swapped depending on the iteration, saves time since there is no file copying
if i % 2 == 0:
tup = (x, binSize, vcLen, currOffsetCounts[x], mergedDir+'/temp.0.npy', mergedDir+'/temp.1.npy', inputBwtDirs)
else:
tup = (x, binSize, vcLen, currOffsetCounts[x], mergedDir+'/temp.1.npy', mergedDir+'/temp.0.npy', inputBwtDirs)
tups.append(tup)
if numProcs > 1:
#TODO: tinker with chunksize, it might matter
myPool = multiprocessing.Pool(numProcs)
#myPool = multiprocessing.pool.ThreadPool(numProcs)
rets = myPool.imap(mergeNewMSBWTPoolCall, tups, chunksize=10)
else:
rets = []
for tup in tups:
rets.append(mergeNewMSBWTPoolCall(tup))
progressCounter = ignored
sys.stdout.write('\r'+str(100*progressCounter*binSize/mergedBWT.shape[0])+'%')
sys.stdout.flush()
for ret in rets:
#iterate through the returns so we can figure out information necessary for continuation
(x, nBHC, nOC, nMI) = ret
binUpdates[x] = nOC
for k in nBHC:
nextBinHasChanged[k] |= nBHC[k]
for b in nOC:
nextOffsetCounts[b] += nOC[b]
needsMoreIterations |= nMI
progressCounter += 1
sys.stdout.write('\r'+str(min(100*progressCounter*binSize/mergedBWT.shape[0], 100))+'%')
sys.stdout.flush()
nextOffsetCounts = np.cumsum(nextOffsetCounts, axis=0)-nextOffsetCounts
if numProcs > 1:
myPool.terminate()
myPool.join()
myPool = None
sys.stdout.write('\r')
sys.stdout.flush()
logger.info('Order solved, saving final array...')
#TODO: make this better
offsets = np.zeros(dtype='<u8', shape=(numInputs,))
for i in xrange(0, mergedBWT.shape[0]/binSize+1):
ind = placeArray[i*binSize:(i+1)*binSize]
if i == mergedBWT.shape[0]/binSize:
ind = ind[0:mergedBWT.shape[0]-i*binSize]
bc = np.bincount(ind, minlength=numInputs)
for x in xrange(0, numInputs):
mergedBWT[np.add(i*binSize, np.where(ind == x))] = msbwts[x].getBWTRange(int(offsets[x]), int(offsets[x]+bc[x]))
offsets += bc
et = time.time()
logger.info('Finished all merge iterations in '+str(et-st)+' seconds.') | This function will take a list of input BWTs (compressed or not) and merge them into a single BWT
@param mergedFN - the destination for the final merged MSBWT
@param inputBWTFN1 - the fn of the first BWT to merge
@param inputBWTFN2 - the fn of the second BWT to merge
@param numProcs - number of processes we're allowed to use
@param logger - output goes here | Below is the the instruction that describes the task:
### Input:
This function will take a list of input BWTs (compressed or not) and merge them into a single BWT
@param mergedFN - the destination for the final merged MSBWT
@param inputBWTFN1 - the fn of the first BWT to merge
@param inputBWTFN2 - the fn of the second BWT to merge
@param numProcs - number of processes we're allowed to use
@param logger - output goes here
### Response:
def mergeNewMSBWT(mergedDir, inputBwtDirs, numProcs, logger):
'''
This function will take a list of input BWTs (compressed or not) and merge them into a single BWT
@param mergedFN - the destination for the final merged MSBWT
@param inputBWTFN1 - the fn of the first BWT to merge
@param inputBWTFN2 - the fn of the second BWT to merge
@param numProcs - number of processes we're allowed to use
@param logger - output goes here
'''
st = time.time()
iterst = time.time()
vcLen = 6
#TODO: take advantage of these to skip an iteration or two perhaps
numInputs = len(inputBwtDirs)
msbwts = [None]*numInputs
mergedLength = 0
for i, dirName in enumerate(inputBwtDirs):
'''
NOTE: in practice, since we're allowing for multiprocessing, we construct the FM-index for each input BWT
simply because in the long run, this allows us to figure out how to start processing chunks separately.
Without this, we would need to track extra information that really just represent the FM-index.
'''
msbwts[i] = MultiStringBWT.loadBWT(dirName, logger)
mergedLength += msbwts[i].totalSize
#binSize = 2**1#small bin debugging
#binSize = 2**15#this one is just for test purposes, makes easy to debug things
#binSize = 2**25#diff in 22-23 is not that much, 23-24 was 8 seconds of difference, so REALLY no diff
binSize = 2**28
#allocate the mergedBWT space
logger.info('Allocating space on disk...')
mergedBWT = np.lib.format.open_memmap(mergedDir+'/msbwt.npy', 'w+', '<u1', (mergedLength,))
#this one will create the array using bits
logger.info('Initializing iterations...')
placeArray = np.lib.format.open_memmap(mergedDir+'/temp.0.npy', 'w+', '<u1', (mergedBWT.shape[0],))
copiedPlaceArray = np.lib.format.open_memmap(mergedDir+'/temp.1.npy', 'w+', '<u1', (mergedBWT.shape[0],))
start = msbwts[0].totalSize
end = 0
#fill out the initial array with 0s, 1s, 2s, etc. as our initial condition
for i, msbwt in enumerate(msbwts):
end += msbwt.getTotalSize()
placeArray[start:end].fill(i)
copiedPlaceArray[start:end].fill(i)
start = end
#create something to track the offsets
#TODO: x/binSize + 1 makes one too many bins if it's exactly divisible by binSize, ex: 4 length BWT with binSize 2
nextBinHasChanged = np.ones(dtype='b', shape=(mergedBWT.shape[0]/binSize+1,))
prevOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs))
currOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs))
nextOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs))
binUpdates = [{}]*(mergedBWT.shape[0]/binSize+1)
bwtInd = 0
offsets = [0]*numInputs
for x in xrange(0, currOffsetCounts.shape[0]):
#set, then change for next iter
nextOffsetCounts[x] = offsets
remaining = binSize
while remaining > 0 and bwtInd < numInputs:
if remaining > msbwts[bwtInd].totalSize-offsets[bwtInd]:
remaining -= msbwts[bwtInd].totalSize-offsets[bwtInd]
offsets[bwtInd] = msbwts[bwtInd].totalSize
bwtInd += 1
else:
offsets[bwtInd] += remaining
remaining = 0
ignored = 0
#original
sys.stdout.write('\rcp ')
sys.stdout.flush()
del copiedPlaceArray
needsMoreIterations = True
i = 0
sameOffsetCount = 0
while needsMoreIterations:
prevOffsetCounts = currOffsetCounts
currOffsetCounts = nextOffsetCounts
nextOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs))
needsMoreIterations = False
sameOffsetCount = 0
#this method uses a condensed byte and will ignore regions that are already finished
sys.stdout.write('\rld ')
sys.stdout.flush()
ignored = 0
iteret = time.time()
sys.stdout.write('\r')
logger.info('Finished iter '+str(i)+' in '+str(iteret-iterst)+'seconds')
iterst = time.time()
i += 1
sys.stdout.write('\rld')
sys.stdout.flush()
#track which bins are actually different
binHasChanged = nextBinHasChanged
nextBinHasChanged = np.zeros(dtype='b', shape=(mergedBWT.shape[0]/binSize+1))
tups = []
for x in xrange(0, mergedBWT.shape[0]/binSize + 1):
#check if the current offset matches the previous iteration offset
sameOffset = np.array_equal(currOffsetCounts[x], prevOffsetCounts[x])
if sameOffset:
sameOffsetCount += 1
'''
TODO: the below False is there because this only works if you do a full file copy right now. It's
because unless we copy, then the appropriate parts of the nextPlaceArray isn't properly updated. It's
unclear whether one of these is better than the other in terms of performance. File copying is slow, but
if only a couple sequences are similar then then skipping is good. I think in general, we only skip at the
beginning for real data though, so I'm going with the no-skip, no-copy form until I can resolve the
problem (if there's a resolution).
'''
if False and not binHasChanged[x] and sameOffset:
for key in binUpdates[x]:
nextOffsetCounts[key] += binUpdates[x][key]
ignored += 1
else:
#note these are swapped depending on the iteration, saves time since there is no file copying
if i % 2 == 0:
tup = (x, binSize, vcLen, currOffsetCounts[x], mergedDir+'/temp.0.npy', mergedDir+'/temp.1.npy', inputBwtDirs)
else:
tup = (x, binSize, vcLen, currOffsetCounts[x], mergedDir+'/temp.1.npy', mergedDir+'/temp.0.npy', inputBwtDirs)
tups.append(tup)
if numProcs > 1:
#TODO: tinker with chunksize, it might matter
myPool = multiprocessing.Pool(numProcs)
#myPool = multiprocessing.pool.ThreadPool(numProcs)
rets = myPool.imap(mergeNewMSBWTPoolCall, tups, chunksize=10)
else:
rets = []
for tup in tups:
rets.append(mergeNewMSBWTPoolCall(tup))
progressCounter = ignored
sys.stdout.write('\r'+str(100*progressCounter*binSize/mergedBWT.shape[0])+'%')
sys.stdout.flush()
for ret in rets:
#iterate through the returns so we can figure out information necessary for continuation
(x, nBHC, nOC, nMI) = ret
binUpdates[x] = nOC
for k in nBHC:
nextBinHasChanged[k] |= nBHC[k]
for b in nOC:
nextOffsetCounts[b] += nOC[b]
needsMoreIterations |= nMI
progressCounter += 1
sys.stdout.write('\r'+str(min(100*progressCounter*binSize/mergedBWT.shape[0], 100))+'%')
sys.stdout.flush()
nextOffsetCounts = np.cumsum(nextOffsetCounts, axis=0)-nextOffsetCounts
if numProcs > 1:
myPool.terminate()
myPool.join()
myPool = None
sys.stdout.write('\r')
sys.stdout.flush()
logger.info('Order solved, saving final array...')
#TODO: make this better
offsets = np.zeros(dtype='<u8', shape=(numInputs,))
for i in xrange(0, mergedBWT.shape[0]/binSize+1):
ind = placeArray[i*binSize:(i+1)*binSize]
if i == mergedBWT.shape[0]/binSize:
ind = ind[0:mergedBWT.shape[0]-i*binSize]
bc = np.bincount(ind, minlength=numInputs)
for x in xrange(0, numInputs):
mergedBWT[np.add(i*binSize, np.where(ind == x))] = msbwts[x].getBWTRange(int(offsets[x]), int(offsets[x]+bc[x]))
offsets += bc
et = time.time()
logger.info('Finished all merge iterations in '+str(et-st)+' seconds.') |
def long_description(*filenames):
"""Provide a long description."""
res = ['']
for filename in filenames:
with open(filename) as fp:
for line in fp:
res.append(' ' + line)
res.append('')
res.append('\n')
return EMPTYSTRING.join(res) | Provide a long description. | Below is the the instruction that describes the task:
### Input:
Provide a long description.
### Response:
def long_description(*filenames):
"""Provide a long description."""
res = ['']
for filename in filenames:
with open(filename) as fp:
for line in fp:
res.append(' ' + line)
res.append('')
res.append('\n')
return EMPTYSTRING.join(res) |
def _StatusUpdateThreadMain(self):
"""Main function of the status update thread."""
while self._status_update_active:
# Make a local copy of the PIDs in case the dict is changed by
# the main thread.
for pid in list(self._process_information_per_pid.keys()):
self._CheckStatusAnalysisProcess(pid)
self._UpdateForemanProcessStatus()
if self._status_update_callback:
self._status_update_callback(self._processing_status)
time.sleep(self._STATUS_UPDATE_INTERVAL) | Main function of the status update thread. | Below is the the instruction that describes the task:
### Input:
Main function of the status update thread.
### Response:
def _StatusUpdateThreadMain(self):
"""Main function of the status update thread."""
while self._status_update_active:
# Make a local copy of the PIDs in case the dict is changed by
# the main thread.
for pid in list(self._process_information_per_pid.keys()):
self._CheckStatusAnalysisProcess(pid)
self._UpdateForemanProcessStatus()
if self._status_update_callback:
self._status_update_callback(self._processing_status)
time.sleep(self._STATUS_UPDATE_INTERVAL) |
def transpose(self, name=None, activate_final=None):
"""Returns transposed `MLP`.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
Returns:
Matching transposed `MLP` module.
"""
if name is None:
name = self.module_name + "_transpose"
if activate_final is None:
activate_final = self.activate_final
output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers]
output_sizes.reverse()
return MLP(
name=name,
output_sizes=output_sizes,
activation=self.activation,
activate_final=activate_final,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
use_bias=self.use_bias,
use_dropout=self.use_dropout) | Returns transposed `MLP`.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
Returns:
Matching transposed `MLP` module. | Below is the the instruction that describes the task:
### Input:
Returns transposed `MLP`.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
Returns:
Matching transposed `MLP` module.
### Response:
def transpose(self, name=None, activate_final=None):
"""Returns transposed `MLP`.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
Returns:
Matching transposed `MLP` module.
"""
if name is None:
name = self.module_name + "_transpose"
if activate_final is None:
activate_final = self.activate_final
output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers]
output_sizes.reverse()
return MLP(
name=name,
output_sizes=output_sizes,
activation=self.activation,
activate_final=activate_final,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
use_bias=self.use_bias,
use_dropout=self.use_dropout) |
def oplog_thread_join(self):
"""Stops all the OplogThreads
"""
LOG.info("MongoConnector: Stopping all OplogThreads")
for thread in self.shard_set.values():
thread.join() | Stops all the OplogThreads | Below is the the instruction that describes the task:
### Input:
Stops all the OplogThreads
### Response:
def oplog_thread_join(self):
"""Stops all the OplogThreads
"""
LOG.info("MongoConnector: Stopping all OplogThreads")
for thread in self.shard_set.values():
thread.join() |
def query(self,
watch_key,
time_indices=None,
slicing=None,
mapping=None):
"""Query tensor store for a given watch_key.
Args:
watch_key: The watch key to query.
time_indices: A numpy-style slicing string for time indices. E.g.,
`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.
slicing: A numpy-style slicing string for individual time steps.
mapping: An mapping string or a list of them. Supported mappings:
`{None, 'image/png', 'health-pill'}`.
Returns:
The potentially sliced values as a nested list of values or its mapped
format. A `list` of nested `list` of values.
Raises:
ValueError: If the shape of the sliced array is incompatible with mapping
mode. Or if the mapping type is invalid.
"""
if watch_key not in self._tensor_data:
raise KeyError("watch_key not found: %s" % watch_key)
if time_indices is None:
time_indices = '-1'
time_slicing = tensor_helper.parse_time_indices(time_indices)
all_time_indices = list(range(self._tensor_data[watch_key].num_total()))
sliced_time_indices = all_time_indices[time_slicing]
if not isinstance(sliced_time_indices, list):
sliced_time_indices = [sliced_time_indices]
recombine_and_map = False
step_mapping = mapping
if len(sliced_time_indices) > 1 and mapping not in (None, ):
recombine_and_map = True
step_mapping = None
output = []
for index in sliced_time_indices:
value = self._tensor_data[watch_key].query(index)[0]
if (value is not None and
not isinstance(value, debug_data.InconvertibleTensorProto)):
output.append(tensor_helper.array_view(
value, slicing=slicing, mapping=step_mapping)[2])
else:
output.append(None)
if recombine_and_map:
if mapping == 'image/png':
output = tensor_helper.array_to_base64_png(output)
elif mapping and mapping != 'none':
logger.warn(
'Unsupported mapping mode after recomining time steps: %s',
mapping)
return output | Query tensor store for a given watch_key.
Args:
watch_key: The watch key to query.
time_indices: A numpy-style slicing string for time indices. E.g.,
`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.
slicing: A numpy-style slicing string for individual time steps.
mapping: An mapping string or a list of them. Supported mappings:
`{None, 'image/png', 'health-pill'}`.
Returns:
The potentially sliced values as a nested list of values or its mapped
format. A `list` of nested `list` of values.
Raises:
ValueError: If the shape of the sliced array is incompatible with mapping
mode. Or if the mapping type is invalid. | Below is the the instruction that describes the task:
### Input:
Query tensor store for a given watch_key.
Args:
watch_key: The watch key to query.
time_indices: A numpy-style slicing string for time indices. E.g.,
`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.
slicing: A numpy-style slicing string for individual time steps.
mapping: An mapping string or a list of them. Supported mappings:
`{None, 'image/png', 'health-pill'}`.
Returns:
The potentially sliced values as a nested list of values or its mapped
format. A `list` of nested `list` of values.
Raises:
ValueError: If the shape of the sliced array is incompatible with mapping
mode. Or if the mapping type is invalid.
### Response:
def query(self,
watch_key,
time_indices=None,
slicing=None,
mapping=None):
"""Query tensor store for a given watch_key.
Args:
watch_key: The watch key to query.
time_indices: A numpy-style slicing string for time indices. E.g.,
`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.
slicing: A numpy-style slicing string for individual time steps.
mapping: An mapping string or a list of them. Supported mappings:
`{None, 'image/png', 'health-pill'}`.
Returns:
The potentially sliced values as a nested list of values or its mapped
format. A `list` of nested `list` of values.
Raises:
ValueError: If the shape of the sliced array is incompatible with mapping
mode. Or if the mapping type is invalid.
"""
if watch_key not in self._tensor_data:
raise KeyError("watch_key not found: %s" % watch_key)
if time_indices is None:
time_indices = '-1'
time_slicing = tensor_helper.parse_time_indices(time_indices)
all_time_indices = list(range(self._tensor_data[watch_key].num_total()))
sliced_time_indices = all_time_indices[time_slicing]
if not isinstance(sliced_time_indices, list):
sliced_time_indices = [sliced_time_indices]
recombine_and_map = False
step_mapping = mapping
if len(sliced_time_indices) > 1 and mapping not in (None, ):
recombine_and_map = True
step_mapping = None
output = []
for index in sliced_time_indices:
value = self._tensor_data[watch_key].query(index)[0]
if (value is not None and
not isinstance(value, debug_data.InconvertibleTensorProto)):
output.append(tensor_helper.array_view(
value, slicing=slicing, mapping=step_mapping)[2])
else:
output.append(None)
if recombine_and_map:
if mapping == 'image/png':
output = tensor_helper.array_to_base64_png(output)
elif mapping and mapping != 'none':
logger.warn(
'Unsupported mapping mode after recomining time steps: %s',
mapping)
return output |
def stream_mapred(self, inputs, query, timeout):
"""
Streams a MapReduce query as (phase, data) pairs. This is a
generator method which should be iterated over.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(mymapred.stream()) as results:
for phase, result in results:
do_something(phase, result)
# Explicit close()
stream = mymapred.stream()
for phase, result in stream:
do_something(phase, result)
stream.close()
:param inputs: the input list/structure
:type inputs: list, dict
:param query: the list of query phases
:type query: list
:param timeout: the query timeout
:type timeout: integer, None
:rtype: iterator
"""
_validate_timeout(timeout)
def make_op(transport):
return transport.stream_mapred(inputs, query, timeout)
for phase, data in self._stream_with_retry(make_op):
yield phase, data | Streams a MapReduce query as (phase, data) pairs. This is a
generator method which should be iterated over.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(mymapred.stream()) as results:
for phase, result in results:
do_something(phase, result)
# Explicit close()
stream = mymapred.stream()
for phase, result in stream:
do_something(phase, result)
stream.close()
:param inputs: the input list/structure
:type inputs: list, dict
:param query: the list of query phases
:type query: list
:param timeout: the query timeout
:type timeout: integer, None
:rtype: iterator | Below is the the instruction that describes the task:
### Input:
Streams a MapReduce query as (phase, data) pairs. This is a
generator method which should be iterated over.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(mymapred.stream()) as results:
for phase, result in results:
do_something(phase, result)
# Explicit close()
stream = mymapred.stream()
for phase, result in stream:
do_something(phase, result)
stream.close()
:param inputs: the input list/structure
:type inputs: list, dict
:param query: the list of query phases
:type query: list
:param timeout: the query timeout
:type timeout: integer, None
:rtype: iterator
### Response:
def stream_mapred(self, inputs, query, timeout):
"""
Streams a MapReduce query as (phase, data) pairs. This is a
generator method which should be iterated over.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(mymapred.stream()) as results:
for phase, result in results:
do_something(phase, result)
# Explicit close()
stream = mymapred.stream()
for phase, result in stream:
do_something(phase, result)
stream.close()
:param inputs: the input list/structure
:type inputs: list, dict
:param query: the list of query phases
:type query: list
:param timeout: the query timeout
:type timeout: integer, None
:rtype: iterator
"""
_validate_timeout(timeout)
def make_op(transport):
return transport.stream_mapred(inputs, query, timeout)
for phase, data in self._stream_with_retry(make_op):
yield phase, data |
def get_vartype(data):
"""Infer the type of a variable (technically a Series).
The types supported are split in standard types and special types.
Standard types:
* Categorical (`TYPE_CAT`): the default type if no other one can be determined
* Numerical (`TYPE_NUM`): if it contains numbers
* Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo
* Date (`TYPE_DATE`): if it contains datetime
Special types:
* Constant (`S_TYPE_CONST`): if all values in the variable are equal
* Unique (`S_TYPE_UNIQUE`): if all values in the variable are different
* Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported
The result is cached by column name in a global variable to avoid recomputing.
Parameters
----------
data : Series
The data type of the Series.
Returns
-------
str
The data type of the Series.
Notes
----
* Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field
or just a boolean with NaN values
* #72: Numeric with low Distinct count should be treated as "Categorical"
"""
if data.name is not None and data.name in _MEMO:
return _MEMO[data.name]
vartype = None
try:
distinct_count = get_groupby_statistic(data)[1]
leng = len(data)
if distinct_count <= 1:
vartype = S_TYPE_CONST
elif pd.api.types.is_bool_dtype(data) or (distinct_count == 2 and pd.api.types.is_numeric_dtype(data)):
vartype = TYPE_BOOL
elif pd.api.types.is_numeric_dtype(data):
vartype = TYPE_NUM
elif pd.api.types.is_datetime64_dtype(data):
vartype = TYPE_DATE
elif distinct_count == leng:
vartype = S_TYPE_UNIQUE
else:
vartype = TYPE_CAT
except:
vartype = S_TYPE_UNSUPPORTED
if data.name is not None:
_MEMO[data.name] = vartype
return vartype | Infer the type of a variable (technically a Series).
The types supported are split in standard types and special types.
Standard types:
* Categorical (`TYPE_CAT`): the default type if no other one can be determined
* Numerical (`TYPE_NUM`): if it contains numbers
* Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo
* Date (`TYPE_DATE`): if it contains datetime
Special types:
* Constant (`S_TYPE_CONST`): if all values in the variable are equal
* Unique (`S_TYPE_UNIQUE`): if all values in the variable are different
* Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported
The result is cached by column name in a global variable to avoid recomputing.
Parameters
----------
data : Series
The data type of the Series.
Returns
-------
str
The data type of the Series.
Notes
----
* Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field
or just a boolean with NaN values
* #72: Numeric with low Distinct count should be treated as "Categorical" | Below is the the instruction that describes the task:
### Input:
Infer the type of a variable (technically a Series).
The types supported are split in standard types and special types.
Standard types:
* Categorical (`TYPE_CAT`): the default type if no other one can be determined
* Numerical (`TYPE_NUM`): if it contains numbers
* Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo
* Date (`TYPE_DATE`): if it contains datetime
Special types:
* Constant (`S_TYPE_CONST`): if all values in the variable are equal
* Unique (`S_TYPE_UNIQUE`): if all values in the variable are different
* Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported
The result is cached by column name in a global variable to avoid recomputing.
Parameters
----------
data : Series
The data type of the Series.
Returns
-------
str
The data type of the Series.
Notes
----
* Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field
or just a boolean with NaN values
* #72: Numeric with low Distinct count should be treated as "Categorical"
### Response:
def get_vartype(data):
"""Infer the type of a variable (technically a Series).
The types supported are split in standard types and special types.
Standard types:
* Categorical (`TYPE_CAT`): the default type if no other one can be determined
* Numerical (`TYPE_NUM`): if it contains numbers
* Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo
* Date (`TYPE_DATE`): if it contains datetime
Special types:
* Constant (`S_TYPE_CONST`): if all values in the variable are equal
* Unique (`S_TYPE_UNIQUE`): if all values in the variable are different
* Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported
The result is cached by column name in a global variable to avoid recomputing.
Parameters
----------
data : Series
The data type of the Series.
Returns
-------
str
The data type of the Series.
Notes
----
* Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field
or just a boolean with NaN values
* #72: Numeric with low Distinct count should be treated as "Categorical"
"""
if data.name is not None and data.name in _MEMO:
return _MEMO[data.name]
vartype = None
try:
distinct_count = get_groupby_statistic(data)[1]
leng = len(data)
if distinct_count <= 1:
vartype = S_TYPE_CONST
elif pd.api.types.is_bool_dtype(data) or (distinct_count == 2 and pd.api.types.is_numeric_dtype(data)):
vartype = TYPE_BOOL
elif pd.api.types.is_numeric_dtype(data):
vartype = TYPE_NUM
elif pd.api.types.is_datetime64_dtype(data):
vartype = TYPE_DATE
elif distinct_count == leng:
vartype = S_TYPE_UNIQUE
else:
vartype = TYPE_CAT
except:
vartype = S_TYPE_UNSUPPORTED
if data.name is not None:
_MEMO[data.name] = vartype
return vartype |
def disconnect(self, device):
"""Disconnect using protocol specific method."""
# self.device.ctrl.sendcontrol(']')
# self.device.ctrl.sendline('quit')
self.log("TELNET disconnect")
try:
self.device.ctrl.send(chr(4))
except OSError:
self.log("Protocol already disconnected") | Disconnect using protocol specific method. | Below is the the instruction that describes the task:
### Input:
Disconnect using protocol specific method.
### Response:
def disconnect(self, device):
"""Disconnect using protocol specific method."""
# self.device.ctrl.sendcontrol(']')
# self.device.ctrl.sendline('quit')
self.log("TELNET disconnect")
try:
self.device.ctrl.send(chr(4))
except OSError:
self.log("Protocol already disconnected") |
def kill(self):
""" Send SIGKILL to the task's process. """
logger.info('Sending SIGKILL to task {0}'.format(self.name))
if hasattr(self, 'remote_client') and self.remote_client is not None:
self.kill_sent = True
self.remote_client.close()
return
if not self.process:
raise DagobahError('task does not have a running process')
self.kill_sent = True
self.process.kill() | Send SIGKILL to the task's process. | Below is the the instruction that describes the task:
### Input:
Send SIGKILL to the task's process.
### Response:
def kill(self):
""" Send SIGKILL to the task's process. """
logger.info('Sending SIGKILL to task {0}'.format(self.name))
if hasattr(self, 'remote_client') and self.remote_client is not None:
self.kill_sent = True
self.remote_client.close()
return
if not self.process:
raise DagobahError('task does not have a running process')
self.kill_sent = True
self.process.kill() |
def _wait_output(popen, is_slow):
"""Returns `True` if we can get output of the command in the
`settings.wait_command` time.
Command will be killed if it wasn't finished in the time.
:type popen: Popen
:rtype: bool
"""
proc = Process(popen.pid)
try:
proc.wait(settings.wait_slow_command if is_slow
else settings.wait_command)
return True
except TimeoutExpired:
for child in proc.children(recursive=True):
_kill_process(child)
_kill_process(proc)
return False | Returns `True` if we can get output of the command in the
`settings.wait_command` time.
Command will be killed if it wasn't finished in the time.
:type popen: Popen
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Returns `True` if we can get output of the command in the
`settings.wait_command` time.
Command will be killed if it wasn't finished in the time.
:type popen: Popen
:rtype: bool
### Response:
def _wait_output(popen, is_slow):
"""Returns `True` if we can get output of the command in the
`settings.wait_command` time.
Command will be killed if it wasn't finished in the time.
:type popen: Popen
:rtype: bool
"""
proc = Process(popen.pid)
try:
proc.wait(settings.wait_slow_command if is_slow
else settings.wait_command)
return True
except TimeoutExpired:
for child in proc.children(recursive=True):
_kill_process(child)
_kill_process(proc)
return False |
def create_switch(type, settings, pin):
"""Create a switch.
Args:
type: (str): type of the switch [A,B,C,D]
settings (str): a comma separted list
pin (int): wiringPi pin
Returns:
switch
"""
switch = None
if type == "A":
group, device = settings.split(",")
switch = pi_switch.RCSwitchA(group, device)
elif type == "B":
addr, channel = settings.split(",")
addr = int(addr)
channel = int(channel)
switch = pi_switch.RCSwitchB(addr, channel)
elif type == "C":
family, group, device = settings.split(",")
group = int(group)
device = int(device)
switch = pi_switch.RCSwitchC(family, group, device)
elif type == "D":
group, device = settings.split(",")
device = int(device)
switch = pi_switch.RCSwitchD(group, device)
else:
print "Type %s is not supported!" % type
sys.exit()
switch.enableTransmit(pin)
return switch | Create a switch.
Args:
type: (str): type of the switch [A,B,C,D]
settings (str): a comma separted list
pin (int): wiringPi pin
Returns:
switch | Below is the the instruction that describes the task:
### Input:
Create a switch.
Args:
type: (str): type of the switch [A,B,C,D]
settings (str): a comma separted list
pin (int): wiringPi pin
Returns:
switch
### Response:
def create_switch(type, settings, pin):
"""Create a switch.
Args:
type: (str): type of the switch [A,B,C,D]
settings (str): a comma separted list
pin (int): wiringPi pin
Returns:
switch
"""
switch = None
if type == "A":
group, device = settings.split(",")
switch = pi_switch.RCSwitchA(group, device)
elif type == "B":
addr, channel = settings.split(",")
addr = int(addr)
channel = int(channel)
switch = pi_switch.RCSwitchB(addr, channel)
elif type == "C":
family, group, device = settings.split(",")
group = int(group)
device = int(device)
switch = pi_switch.RCSwitchC(family, group, device)
elif type == "D":
group, device = settings.split(",")
device = int(device)
switch = pi_switch.RCSwitchD(group, device)
else:
print "Type %s is not supported!" % type
sys.exit()
switch.enableTransmit(pin)
return switch |
def delete_collection_namespaced_replica_set(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_replica_set # noqa: E501
delete collection of ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs) # noqa: E501
return data | delete_collection_namespaced_replica_set # noqa: E501
delete collection of ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
delete_collection_namespaced_replica_set # noqa: E501
delete collection of ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_collection_namespaced_replica_set(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_replica_set # noqa: E501
delete collection of ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs) # noqa: E501
return data |
def parse_or_die(self, args=None):
"""Like :meth:`ParseKeywords.parse`, but calls :func:`pkwit.cli.die` if a
:exc:`KwargvError` is raised, printing the exception text. Returns
*self* for convenience.
"""
from .cli import die
try:
return self.parse(args)
except KwargvError as e:
die(e) | Like :meth:`ParseKeywords.parse`, but calls :func:`pkwit.cli.die` if a
:exc:`KwargvError` is raised, printing the exception text. Returns
*self* for convenience. | Below is the the instruction that describes the task:
### Input:
Like :meth:`ParseKeywords.parse`, but calls :func:`pkwit.cli.die` if a
:exc:`KwargvError` is raised, printing the exception text. Returns
*self* for convenience.
### Response:
def parse_or_die(self, args=None):
"""Like :meth:`ParseKeywords.parse`, but calls :func:`pkwit.cli.die` if a
:exc:`KwargvError` is raised, printing the exception text. Returns
*self* for convenience.
"""
from .cli import die
try:
return self.parse(args)
except KwargvError as e:
die(e) |
def copy(input, **params):
"""
Copies input or input's selected fields
:param input:
:param params:
:return: input
"""
PARAM_FIELDS = 'fields'
def filter_fields(obj, fields):
return {k:v for k,v in obj.items() if k in fields}
if PARAM_FIELDS in params:
fields = params.get(PARAM_FIELDS)
if isinstance(input, list):
res = []
for row in input:
res.append(filter_fields(row, fields))
return res
elif isinstance(input, dict):
return filter_fields(input, fields)
else:
raise NotImplementedError('{} is not supported'.format(type(input)))
else:
return input | Copies input or input's selected fields
:param input:
:param params:
:return: input | Below is the the instruction that describes the task:
### Input:
Copies input or input's selected fields
:param input:
:param params:
:return: input
### Response:
def copy(input, **params):
"""
Copies input or input's selected fields
:param input:
:param params:
:return: input
"""
PARAM_FIELDS = 'fields'
def filter_fields(obj, fields):
return {k:v for k,v in obj.items() if k in fields}
if PARAM_FIELDS in params:
fields = params.get(PARAM_FIELDS)
if isinstance(input, list):
res = []
for row in input:
res.append(filter_fields(row, fields))
return res
elif isinstance(input, dict):
return filter_fields(input, fields)
else:
raise NotImplementedError('{} is not supported'.format(type(input)))
else:
return input |
def fs_obj_query_info(self, path, follow_symlinks):
"""Queries information about a file system object (file, directory, etc)
in the guest.
in path of type str
Path to the file system object to gather information about.
Guest path style.
in follow_symlinks of type bool
Information about symbolic links is returned if @c false. Otherwise,
symbolic links are followed and the returned information concerns
itself with the symlink target if @c true.
return info of type :class:`IGuestFsObjInfo`
:py:class:`IGuestFsObjInfo` object containing the information.
raises :class:`VBoxErrorObjectNotFound`
The file system object was not found.
raises :class:`VBoxErrorIprtError`
Error while querying information.
"""
if not isinstance(path, basestring):
raise TypeError("path can only be an instance of type basestring")
if not isinstance(follow_symlinks, bool):
raise TypeError("follow_symlinks can only be an instance of type bool")
info = self._call("fsObjQueryInfo",
in_p=[path, follow_symlinks])
info = IGuestFsObjInfo(info)
return info | Queries information about a file system object (file, directory, etc)
in the guest.
in path of type str
Path to the file system object to gather information about.
Guest path style.
in follow_symlinks of type bool
Information about symbolic links is returned if @c false. Otherwise,
symbolic links are followed and the returned information concerns
itself with the symlink target if @c true.
return info of type :class:`IGuestFsObjInfo`
:py:class:`IGuestFsObjInfo` object containing the information.
raises :class:`VBoxErrorObjectNotFound`
The file system object was not found.
raises :class:`VBoxErrorIprtError`
Error while querying information. | Below is the the instruction that describes the task:
### Input:
Queries information about a file system object (file, directory, etc)
in the guest.
in path of type str
Path to the file system object to gather information about.
Guest path style.
in follow_symlinks of type bool
Information about symbolic links is returned if @c false. Otherwise,
symbolic links are followed and the returned information concerns
itself with the symlink target if @c true.
return info of type :class:`IGuestFsObjInfo`
:py:class:`IGuestFsObjInfo` object containing the information.
raises :class:`VBoxErrorObjectNotFound`
The file system object was not found.
raises :class:`VBoxErrorIprtError`
Error while querying information.
### Response:
def fs_obj_query_info(self, path, follow_symlinks):
"""Queries information about a file system object (file, directory, etc)
in the guest.
in path of type str
Path to the file system object to gather information about.
Guest path style.
in follow_symlinks of type bool
Information about symbolic links is returned if @c false. Otherwise,
symbolic links are followed and the returned information concerns
itself with the symlink target if @c true.
return info of type :class:`IGuestFsObjInfo`
:py:class:`IGuestFsObjInfo` object containing the information.
raises :class:`VBoxErrorObjectNotFound`
The file system object was not found.
raises :class:`VBoxErrorIprtError`
Error while querying information.
"""
if not isinstance(path, basestring):
raise TypeError("path can only be an instance of type basestring")
if not isinstance(follow_symlinks, bool):
raise TypeError("follow_symlinks can only be an instance of type bool")
info = self._call("fsObjQueryInfo",
in_p=[path, follow_symlinks])
info = IGuestFsObjInfo(info)
return info |
def convert_values(args_list):
"""convert_value in bulk.
:param args_list: list of value, source, target currency pairs
:return: map of converted values
"""
rate_map = get_rates(map(itemgetter(1, 2), args_list))
value_map = {}
for value, source, target in args_list:
args = (value, source, target)
if source == target:
value_map[args] = value
else:
value_map[args] = value * rate_map[(source, target)]
return value_map | convert_value in bulk.
:param args_list: list of value, source, target currency pairs
:return: map of converted values | Below is the the instruction that describes the task:
### Input:
convert_value in bulk.
:param args_list: list of value, source, target currency pairs
:return: map of converted values
### Response:
def convert_values(args_list):
"""convert_value in bulk.
:param args_list: list of value, source, target currency pairs
:return: map of converted values
"""
rate_map = get_rates(map(itemgetter(1, 2), args_list))
value_map = {}
for value, source, target in args_list:
args = (value, source, target)
if source == target:
value_map[args] = value
else:
value_map[args] = value * rate_map[(source, target)]
return value_map |
def explain_weights_dfs(estimator, **kwargs):
# type: (...) -> Dict[str, pd.DataFrame]
""" Explain weights and export them to a dict with ``pandas.DataFrame``
values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does).
All keyword arguments are passed to :func:`eli5.explain_weights`.
Weights of all features are exported by default.
"""
kwargs = _set_defaults(kwargs)
return format_as_dataframes(
eli5.explain_weights(estimator, **kwargs)) | Explain weights and export them to a dict with ``pandas.DataFrame``
values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does).
All keyword arguments are passed to :func:`eli5.explain_weights`.
Weights of all features are exported by default. | Below is the the instruction that describes the task:
### Input:
Explain weights and export them to a dict with ``pandas.DataFrame``
values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does).
All keyword arguments are passed to :func:`eli5.explain_weights`.
Weights of all features are exported by default.
### Response:
def explain_weights_dfs(estimator, **kwargs):
# type: (...) -> Dict[str, pd.DataFrame]
""" Explain weights and export them to a dict with ``pandas.DataFrame``
values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does).
All keyword arguments are passed to :func:`eli5.explain_weights`.
Weights of all features are exported by default.
"""
kwargs = _set_defaults(kwargs)
return format_as_dataframes(
eli5.explain_weights(estimator, **kwargs)) |
def zeq_magic(meas_file='measurements.txt', spec_file='',crd='s',input_dir_path='.', angle=0,
n_plots=5, save_plots=True, fmt="svg", interactive=False, specimen="",
samp_file='samples.txt', contribution=None,fignum=1):
"""
zeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
input_dir_path : str
input directory of meas_file, default "."
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number
"""
def plot_interpretations(ZED, spec_container, this_specimen, this_specimen_measurements, datablock):
if cb.is_null(spec_container) or cb.is_null(this_specimen_measurements) or cb.is_null(datablock):
return ZED
if 'method_codes' not in spec_container.df.columns:
return ZED
prior_spec_data = spec_container.get_records_for_code(
'LP-DIR', strict_match=False) # look up all prior directional interpretations
prior_specimen_interpretations=[]
if not len(prior_spec_data):
return ZED
mpars = {"specimen_direction_type": "Error"}
if len(prior_spec_data):
prior_specimen_interpretations = prior_spec_data[prior_spec_data['specimen'].astype(str) == this_specimen] #.str.match(this_specimen) == True]
if len(prior_specimen_interpretations):
if len(prior_specimen_interpretations)>0:
beg_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_min.values).tolist()
end_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_max.values).tolist()
spec_methods = prior_specimen_interpretations.method_codes.tolist()
# step through all prior interpretations and plot them
for ind in range(len(beg_pcas)):
spec_meths = spec_methods[ind].split(':')
for m in spec_meths:
if 'DE-BFL' in m:
calculation_type = 'DE-BFL' # best fit line
if 'DE-BFP' in m:
calculation_type = 'DE-BFP' # best fit plane
if 'DE-FM' in m:
calculation_type = 'DE-FM' # fisher mean
if 'DE-BFL-A' in m:
calculation_type = 'DE-BFL-A' # anchored best fit line
treatments = pd.to_numeric(this_specimen_measurements.treatment).tolist()
if len(beg_pcas)!=0:
try:
# getting the starting and ending points
start, end = treatments.index(beg_pcas[ind]), treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError as ex:
mpars['specimen_direction_type'] = "Error"
try:
if beg_pcas[ind] == 0:
start = 0
else:
start = treatments.index(beg_pcas[ind])
if end_pcas[ind] == 0:
end = 0
else:
end = treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError:
mpars['specimen_direction_type'] = "Error"
# calculate direction/plane
if mpars["specimen_direction_type"] != "Error":
# put it on the plot
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
#if interactive:
# pmagplotlib.draw_figs(ZED)
else:
print('\n-W- Specimen {} record contains invalid start/stop bounds:'.format(this_specimen))
print(prior_spec_data.loc[this_specimen][['meas_step_min', 'meas_step_max']])
print('\n Measurement records:')
cols = list(set(['treat_ac_field', 'treat_temp']).intersection(this_specimen_measurements.columns))
print(this_specimen_measurements[cols])
print('\n Data will be plotted without interpretations\n')
return ZED
def make_plots(spec, cnt, meas_df, spec_container, samp_container=None):
# get sample data for orientation
if spec_container:
try:
samps = spec_container.df.loc[spec, 'sample']
except KeyError:
samps = ""
samp_df = []
if isinstance(samps, int) or isinstance(samps, float) or isinstance(samps, np.int64):
if np.isnan(samps):
samp = ""
samp_df = []
else:
samp = str(samps)
samp_container.df.index = samp_container.df.index.astype(str)
samp_df = samp_container.df[samp_container.df.index == samp]
elif isinstance(samps, type(None)):
samp = ""
samp_df = []
elif len(samps):
if isinstance(samps, str):
samp = samps
else:
samp = samps.iloc[0]
samp_df = samp_container.df[samp_container.df.index == samp]
else:
samp_df = []
# we can make the figure dictionary that pmagplotlib likes:
ZED = {'eqarea': cnt, 'zijd': cnt+1, 'demag': cnt+2} # make datablock
# get the relevant data
spec_df = meas_df[meas_df.specimen == s]
# remove ARM data
spec_df = spec_df[- spec_df.method_codes.str.contains(
'LP-*[\w]*-ARM')]
# split data into NRM, thermal, and af dataframes
spec_df_nrm = spec_df[spec_df.method_codes.str.contains(
'LT-NO')] # get the NRM data
spec_df_th = spec_df[spec_df.method_codes.str.contains(
'LT-T-Z')] # zero field thermal demag steps
try:
cond = spec_df.method_codes.str.contains('(^|[\s\:])LT-PTRM')
spec_df_th = spec_df_th[-cond] # get rid of some pTRM steps
except ValueError:
keep_inds = []
n = 0
for ind, row in spec_df_th.copy().iterrows():
if 'LT-PTRM' in row['method_codes'] and 'ALT-PTRM' not in row['method_codes']:
keep_inds.append(n)
else:
pass
n += 1
if len(keep_inds) < n:
spec_df_th = spec_df_th.iloc[keep_inds]
spec_df_af = spec_df[spec_df.method_codes.str.contains('LT-AF-Z')]
this_spec_meas_df = None
datablock = None
if (not len(spec_df_th.index) > 1) and (not len(spec_df_af.index) > 1):
return
if len(spec_df_th.index) > 1: # this is a thermal run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_th])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'K' # units are kelvin
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_temp'] = this_spec_meas_df['treat_temp'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return
datablock = this_spec_meas_df[['treat_temp', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
if len(spec_df_af.index) > 1: # this is an af run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_af])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'T' # these are AF data
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_ac_field'] = this_spec_meas_df['treat_ac_field'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return
datablock = this_spec_meas_df[['treat_ac_field', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
return plot_interpretations(ZED, spec_container, s, this_spec_meas_df, datablock)
if interactive:
save_plots = False
# read in MagIC formatted data if contribution object not provided
if not isinstance(contribution, cb.Contribution):
input_dir_path = os.path.realpath(input_dir_path)
file_path = pmag.resolve_file_name(meas_file, input_dir_path)
# read in magic formatted data
if not os.path.exists(file_path):
print('No such file:', file_path)
return False, []
custom_filenames = {'measurements': file_path, 'specimens': spec_file, 'samples': samp_file}
contribution = cb.Contribution(input_dir_path, custom_filenames=custom_filenames,
read_tables=['measurements', 'specimens',
'contribution', 'samples'])
if pmagplotlib.isServer:
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
meas_container = contribution.tables['measurements']
meas_df = contribution.tables['measurements'].df #
#meas_df=pd.read_csv(file_path, sep='\t', header=1)
spec_container = contribution.tables.get('specimens', None)
samp_container = contribution.tables.get('samples', None)
#if not spec_file:
# spec_file = os.path.join(os.path.split(file_path)[0], "specimens.txt")
#if os.path.exists(spec_file):
# spec_container = cb.MagicDataFrame(spec_file, dtype="specimens")
#else:
# spec_container = None
meas_df['blank'] = "" # this is a dummy variable expected by plotZED
if 'treat_ac_field' in meas_df.columns:
# create 'treatment' column.
# uses treat_temp if treat_ac_field is missing OR zero.
# (have to take this into account for plotting later)
if 'treat_temp' in meas_df.columns:
meas_df['treatment'] = meas_df['treat_ac_field'].where(
cond=meas_df['treat_ac_field'].astype(bool), other=meas_df['treat_temp'])
else:
meas_df['treatment'] = meas_df['treat_ac_field']
else:
meas_df['treatment'] = meas_df['treat_temp']
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
specimens = meas_df.specimen.unique() # list of specimen names
if len(specimens) == 0:
print('there are no data for plotting')
return False, []
# check measurement table for req'd fields
missing = []
reqd_cols_present = meas_df.columns.intersection(['dir_dec', 'dir_inc', 'magn_moment'])
for col in ['dir_dec', 'dir_inc', 'magn_moment']:
if col not in reqd_cols_present:
missing.append(col)
if missing:
print('-W- Missing required column(s) {}, cannot run zeq_magic'.format(', '.join(missing)))
return False, []
cnt = fignum
if n_plots != "all":
if len(specimens) > n_plots:
specimens = specimens[:n_plots]
saved = []
if specimen:
specimens = [specimen]
for s in specimens:
ZED = make_plots(s, cnt, meas_df, spec_container, samp_container)
if not ZED:
if pmagplotlib.verbose:
print('No plots could be created for specimen:', s)
continue
titles = {key: s + "_" + key + "." + fmt for key in ZED}
if pmagplotlib.isServer:
titles = {}
titles['eqarea'] = 'Equal Area Plot'
titles['zijd'] = 'Zijderveld Plot'
titles['demag'] = 'Demagnetization Plot'
con_id = ""
if 'contribution' in contribution.tables:
if 'id' in contribution.tables['contribution'].df.columns:
con_id = contribution.tables['contribution'].df['id'].values[0]
pmagplotlib.add_borders(ZED, titles, con_id=con_id)
for title in titles:
# try to get the full hierarchy for plot names
df_slice = meas_container.df[meas_container.df['specimen'] == s]
location = str(meas_container.get_name('location', df_slice))
site = str(meas_container.get_name('site', df_slice))
sample = str(meas_container.get_name('sample', df_slice))
# add coord here!
filename = 'LO:_'+location+'_SI:_'+site+'_SA:_'+sample + \
'_SP:_'+str(s)+'_CO:_' + '_TY:_'+title+'_.png'
titles[title] = filename
if save_plots:
saved.extend(pmagplotlib.save_plots(ZED, titles))
elif interactive:
pmagplotlib.draw_figs(ZED)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(ZED, titles))
else:
continue
else:
cnt += 3
return True, saved | zeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
input_dir_path : str
input directory of meas_file, default "."
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number | Below is the the instruction that describes the task:
### Input:
zeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
input_dir_path : str
input directory of meas_file, default "."
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number
### Response:
def zeq_magic(meas_file='measurements.txt', spec_file='',crd='s',input_dir_path='.', angle=0,
n_plots=5, save_plots=True, fmt="svg", interactive=False, specimen="",
samp_file='samples.txt', contribution=None,fignum=1):
"""
zeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
input_dir_path : str
input directory of meas_file, default "."
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number
"""
def plot_interpretations(ZED, spec_container, this_specimen, this_specimen_measurements, datablock):
if cb.is_null(spec_container) or cb.is_null(this_specimen_measurements) or cb.is_null(datablock):
return ZED
if 'method_codes' not in spec_container.df.columns:
return ZED
prior_spec_data = spec_container.get_records_for_code(
'LP-DIR', strict_match=False) # look up all prior directional interpretations
prior_specimen_interpretations=[]
if not len(prior_spec_data):
return ZED
mpars = {"specimen_direction_type": "Error"}
if len(prior_spec_data):
prior_specimen_interpretations = prior_spec_data[prior_spec_data['specimen'].astype(str) == this_specimen] #.str.match(this_specimen) == True]
if len(prior_specimen_interpretations):
if len(prior_specimen_interpretations)>0:
beg_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_min.values).tolist()
end_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_max.values).tolist()
spec_methods = prior_specimen_interpretations.method_codes.tolist()
# step through all prior interpretations and plot them
for ind in range(len(beg_pcas)):
spec_meths = spec_methods[ind].split(':')
for m in spec_meths:
if 'DE-BFL' in m:
calculation_type = 'DE-BFL' # best fit line
if 'DE-BFP' in m:
calculation_type = 'DE-BFP' # best fit plane
if 'DE-FM' in m:
calculation_type = 'DE-FM' # fisher mean
if 'DE-BFL-A' in m:
calculation_type = 'DE-BFL-A' # anchored best fit line
treatments = pd.to_numeric(this_specimen_measurements.treatment).tolist()
if len(beg_pcas)!=0:
try:
# getting the starting and ending points
start, end = treatments.index(beg_pcas[ind]), treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError as ex:
mpars['specimen_direction_type'] = "Error"
try:
if beg_pcas[ind] == 0:
start = 0
else:
start = treatments.index(beg_pcas[ind])
if end_pcas[ind] == 0:
end = 0
else:
end = treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError:
mpars['specimen_direction_type'] = "Error"
# calculate direction/plane
if mpars["specimen_direction_type"] != "Error":
# put it on the plot
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
#if interactive:
# pmagplotlib.draw_figs(ZED)
else:
print('\n-W- Specimen {} record contains invalid start/stop bounds:'.format(this_specimen))
print(prior_spec_data.loc[this_specimen][['meas_step_min', 'meas_step_max']])
print('\n Measurement records:')
cols = list(set(['treat_ac_field', 'treat_temp']).intersection(this_specimen_measurements.columns))
print(this_specimen_measurements[cols])
print('\n Data will be plotted without interpretations\n')
return ZED
def make_plots(spec, cnt, meas_df, spec_container, samp_container=None):
# get sample data for orientation
if spec_container:
try:
samps = spec_container.df.loc[spec, 'sample']
except KeyError:
samps = ""
samp_df = []
if isinstance(samps, int) or isinstance(samps, float) or isinstance(samps, np.int64):
if np.isnan(samps):
samp = ""
samp_df = []
else:
samp = str(samps)
samp_container.df.index = samp_container.df.index.astype(str)
samp_df = samp_container.df[samp_container.df.index == samp]
elif isinstance(samps, type(None)):
samp = ""
samp_df = []
elif len(samps):
if isinstance(samps, str):
samp = samps
else:
samp = samps.iloc[0]
samp_df = samp_container.df[samp_container.df.index == samp]
else:
samp_df = []
# we can make the figure dictionary that pmagplotlib likes:
ZED = {'eqarea': cnt, 'zijd': cnt+1, 'demag': cnt+2} # make datablock
# get the relevant data
spec_df = meas_df[meas_df.specimen == s]
# remove ARM data
spec_df = spec_df[- spec_df.method_codes.str.contains(
'LP-*[\w]*-ARM')]
# split data into NRM, thermal, and af dataframes
spec_df_nrm = spec_df[spec_df.method_codes.str.contains(
'LT-NO')] # get the NRM data
spec_df_th = spec_df[spec_df.method_codes.str.contains(
'LT-T-Z')] # zero field thermal demag steps
try:
cond = spec_df.method_codes.str.contains('(^|[\s\:])LT-PTRM')
spec_df_th = spec_df_th[-cond] # get rid of some pTRM steps
except ValueError:
keep_inds = []
n = 0
for ind, row in spec_df_th.copy().iterrows():
if 'LT-PTRM' in row['method_codes'] and 'ALT-PTRM' not in row['method_codes']:
keep_inds.append(n)
else:
pass
n += 1
if len(keep_inds) < n:
spec_df_th = spec_df_th.iloc[keep_inds]
spec_df_af = spec_df[spec_df.method_codes.str.contains('LT-AF-Z')]
this_spec_meas_df = None
datablock = None
if (not len(spec_df_th.index) > 1) and (not len(spec_df_af.index) > 1):
return
if len(spec_df_th.index) > 1: # this is a thermal run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_th])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'K' # units are kelvin
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_temp'] = this_spec_meas_df['treat_temp'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return
datablock = this_spec_meas_df[['treat_temp', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
if len(spec_df_af.index) > 1: # this is an af run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_af])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'T' # these are AF data
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_ac_field'] = this_spec_meas_df['treat_ac_field'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return
datablock = this_spec_meas_df[['treat_ac_field', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
return plot_interpretations(ZED, spec_container, s, this_spec_meas_df, datablock)
if interactive:
save_plots = False
# read in MagIC formatted data if contribution object not provided
if not isinstance(contribution, cb.Contribution):
input_dir_path = os.path.realpath(input_dir_path)
file_path = pmag.resolve_file_name(meas_file, input_dir_path)
# read in magic formatted data
if not os.path.exists(file_path):
print('No such file:', file_path)
return False, []
custom_filenames = {'measurements': file_path, 'specimens': spec_file, 'samples': samp_file}
contribution = cb.Contribution(input_dir_path, custom_filenames=custom_filenames,
read_tables=['measurements', 'specimens',
'contribution', 'samples'])
if pmagplotlib.isServer:
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
meas_container = contribution.tables['measurements']
meas_df = contribution.tables['measurements'].df #
#meas_df=pd.read_csv(file_path, sep='\t', header=1)
spec_container = contribution.tables.get('specimens', None)
samp_container = contribution.tables.get('samples', None)
#if not spec_file:
# spec_file = os.path.join(os.path.split(file_path)[0], "specimens.txt")
#if os.path.exists(spec_file):
# spec_container = cb.MagicDataFrame(spec_file, dtype="specimens")
#else:
# spec_container = None
meas_df['blank'] = "" # this is a dummy variable expected by plotZED
if 'treat_ac_field' in meas_df.columns:
# create 'treatment' column.
# uses treat_temp if treat_ac_field is missing OR zero.
# (have to take this into account for plotting later)
if 'treat_temp' in meas_df.columns:
meas_df['treatment'] = meas_df['treat_ac_field'].where(
cond=meas_df['treat_ac_field'].astype(bool), other=meas_df['treat_temp'])
else:
meas_df['treatment'] = meas_df['treat_ac_field']
else:
meas_df['treatment'] = meas_df['treat_temp']
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
specimens = meas_df.specimen.unique() # list of specimen names
if len(specimens) == 0:
print('there are no data for plotting')
return False, []
# check measurement table for req'd fields
missing = []
reqd_cols_present = meas_df.columns.intersection(['dir_dec', 'dir_inc', 'magn_moment'])
for col in ['dir_dec', 'dir_inc', 'magn_moment']:
if col not in reqd_cols_present:
missing.append(col)
if missing:
print('-W- Missing required column(s) {}, cannot run zeq_magic'.format(', '.join(missing)))
return False, []
cnt = fignum
if n_plots != "all":
if len(specimens) > n_plots:
specimens = specimens[:n_plots]
saved = []
if specimen:
specimens = [specimen]
for s in specimens:
ZED = make_plots(s, cnt, meas_df, spec_container, samp_container)
if not ZED:
if pmagplotlib.verbose:
print('No plots could be created for specimen:', s)
continue
titles = {key: s + "_" + key + "." + fmt for key in ZED}
if pmagplotlib.isServer:
titles = {}
titles['eqarea'] = 'Equal Area Plot'
titles['zijd'] = 'Zijderveld Plot'
titles['demag'] = 'Demagnetization Plot'
con_id = ""
if 'contribution' in contribution.tables:
if 'id' in contribution.tables['contribution'].df.columns:
con_id = contribution.tables['contribution'].df['id'].values[0]
pmagplotlib.add_borders(ZED, titles, con_id=con_id)
for title in titles:
# try to get the full hierarchy for plot names
df_slice = meas_container.df[meas_container.df['specimen'] == s]
location = str(meas_container.get_name('location', df_slice))
site = str(meas_container.get_name('site', df_slice))
sample = str(meas_container.get_name('sample', df_slice))
# add coord here!
filename = 'LO:_'+location+'_SI:_'+site+'_SA:_'+sample + \
'_SP:_'+str(s)+'_CO:_' + '_TY:_'+title+'_.png'
titles[title] = filename
if save_plots:
saved.extend(pmagplotlib.save_plots(ZED, titles))
elif interactive:
pmagplotlib.draw_figs(ZED)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(ZED, titles))
else:
continue
else:
cnt += 3
return True, saved |
def update(self, dist):
"""
Adds the given distribution's counts to the current distribution.
"""
assert isinstance(dist, DDist)
for k, c in iteritems(dist.counts):
self.counts[k] += c
self.total += dist.total | Adds the given distribution's counts to the current distribution. | Below is the the instruction that describes the task:
### Input:
Adds the given distribution's counts to the current distribution.
### Response:
def update(self, dist):
"""
Adds the given distribution's counts to the current distribution.
"""
assert isinstance(dist, DDist)
for k, c in iteritems(dist.counts):
self.counts[k] += c
self.total += dist.total |
def reftag_to_cls(fn):
"""
decorator that checks function arguments for `concrete` and `resource`
and will properly set them to class references if a string (reftag) is
passed as the value
"""
names, _, _, values = inspect.getargspec(fn)
@wraps(fn)
def wrapped(*args, **kwargs):
i = 0
backend = args[0]
for name in names[1:]:
value = args[i]
if name == "concrete" and isinstance(value, six.string_types):
args[i] = backend.REFTAG_CONCRETE[value]
elif name == "resource" and isinstance(value, six.string_types):
args[i] = backend.REFTAG_RESOURCE[value]
i += 1
return fn(*args, **kwargs)
return wrapped | decorator that checks function arguments for `concrete` and `resource`
and will properly set them to class references if a string (reftag) is
passed as the value | Below is the the instruction that describes the task:
### Input:
decorator that checks function arguments for `concrete` and `resource`
and will properly set them to class references if a string (reftag) is
passed as the value
### Response:
def reftag_to_cls(fn):
"""
decorator that checks function arguments for `concrete` and `resource`
and will properly set them to class references if a string (reftag) is
passed as the value
"""
names, _, _, values = inspect.getargspec(fn)
@wraps(fn)
def wrapped(*args, **kwargs):
i = 0
backend = args[0]
for name in names[1:]:
value = args[i]
if name == "concrete" and isinstance(value, six.string_types):
args[i] = backend.REFTAG_CONCRETE[value]
elif name == "resource" and isinstance(value, six.string_types):
args[i] = backend.REFTAG_RESOURCE[value]
i += 1
return fn(*args, **kwargs)
return wrapped |
def predict_proba(self, p):
""" Calculate the calibrated probabilities
Parameters
----------
y_prob : array-like of shape = [n_samples, 2]
Predicted probabilities to be calibrated using calibration map
Returns
-------
y_prob_cal : array-like of shape = [n_samples, 1]
Predicted calibrated probabilities
"""
# TODO: Check input
if p.size != p.shape[0]:
p = p[:, 1]
calibrated_proba = np.zeros(p.shape[0])
for i in range(self.calibration_map.shape[0]):
calibrated_proba[np.logical_and(self.calibration_map[i, 1] <= p, self.calibration_map[i, 0] > p)] = \
self.calibration_map[i, 2]
# TODO: return 2D and refactor
return calibrated_proba | Calculate the calibrated probabilities
Parameters
----------
y_prob : array-like of shape = [n_samples, 2]
Predicted probabilities to be calibrated using calibration map
Returns
-------
y_prob_cal : array-like of shape = [n_samples, 1]
Predicted calibrated probabilities | Below is the the instruction that describes the task:
### Input:
Calculate the calibrated probabilities
Parameters
----------
y_prob : array-like of shape = [n_samples, 2]
Predicted probabilities to be calibrated using calibration map
Returns
-------
y_prob_cal : array-like of shape = [n_samples, 1]
Predicted calibrated probabilities
### Response:
def predict_proba(self, p):
""" Calculate the calibrated probabilities
Parameters
----------
y_prob : array-like of shape = [n_samples, 2]
Predicted probabilities to be calibrated using calibration map
Returns
-------
y_prob_cal : array-like of shape = [n_samples, 1]
Predicted calibrated probabilities
"""
# TODO: Check input
if p.size != p.shape[0]:
p = p[:, 1]
calibrated_proba = np.zeros(p.shape[0])
for i in range(self.calibration_map.shape[0]):
calibrated_proba[np.logical_and(self.calibration_map[i, 1] <= p, self.calibration_map[i, 0] > p)] = \
self.calibration_map[i, 2]
# TODO: return 2D and refactor
return calibrated_proba |
def setup_scrollarea(self):
"""Setup the scrollarea that will contain the FigureThumbnails."""
self.view = QWidget()
self.scene = QGridLayout(self.view)
self.scene.setColumnStretch(0, 100)
self.scene.setColumnStretch(2, 100)
self.scrollarea = QScrollArea()
self.scrollarea.setWidget(self.view)
self.scrollarea.setWidgetResizable(True)
self.scrollarea.setFrameStyle(0)
self.scrollarea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollarea.setSizePolicy(QSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Preferred))
# Set the vertical scrollbar explicitely :
# This is required to avoid a "RuntimeError: no access to protected
# functions or signals for objects not created from Python" in Linux.
self.scrollarea.setVerticalScrollBar(QScrollBar())
return self.scrollarea | Setup the scrollarea that will contain the FigureThumbnails. | Below is the the instruction that describes the task:
### Input:
Setup the scrollarea that will contain the FigureThumbnails.
### Response:
def setup_scrollarea(self):
"""Setup the scrollarea that will contain the FigureThumbnails."""
self.view = QWidget()
self.scene = QGridLayout(self.view)
self.scene.setColumnStretch(0, 100)
self.scene.setColumnStretch(2, 100)
self.scrollarea = QScrollArea()
self.scrollarea.setWidget(self.view)
self.scrollarea.setWidgetResizable(True)
self.scrollarea.setFrameStyle(0)
self.scrollarea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollarea.setSizePolicy(QSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Preferred))
# Set the vertical scrollbar explicitely :
# This is required to avoid a "RuntimeError: no access to protected
# functions or signals for objects not created from Python" in Linux.
self.scrollarea.setVerticalScrollBar(QScrollBar())
return self.scrollarea |
def handle(self, *args, **options):
"""
Making it happen.
"""
logger.info("Build started")
# Set options
self.set_options(*args, **options)
# Get the build directory ready
if not options.get("keep_build_dir"):
self.init_build_dir()
# Build up static files
if not options.get("skip_static"):
self.build_static()
# Build the media directory
if not options.get("skip_media"):
self.build_media()
# Build views
self.build_views()
# Close out
logger.info("Build finished") | Making it happen. | Below is the the instruction that describes the task:
### Input:
Making it happen.
### Response:
def handle(self, *args, **options):
"""
Making it happen.
"""
logger.info("Build started")
# Set options
self.set_options(*args, **options)
# Get the build directory ready
if not options.get("keep_build_dir"):
self.init_build_dir()
# Build up static files
if not options.get("skip_static"):
self.build_static()
# Build the media directory
if not options.get("skip_media"):
self.build_media()
# Build views
self.build_views()
# Close out
logger.info("Build finished") |
def read_data(archive, arc_type, day, stachans, length=86400):
"""
Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
"""
st = []
available_stations = _check_available_data(archive, arc_type, day)
for station in stachans:
if len(station[1]) == 2:
# Cope with two char channel naming in seisan
station_map = (station[0], station[1][0] + '*' + station[1][1])
available_stations_map = [(sta[0], sta[1][0] + '*' + sta[1][-1])
for sta in available_stations]
else:
station_map = station
available_stations_map = available_stations
if station_map not in available_stations_map:
msg = ' '.join([station[0], station_map[1], 'is not available for',
day.strftime('%Y/%m/%d')])
warnings.warn(msg)
continue
if arc_type.lower() == 'seishub':
client = SeishubClient(archive)
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
elif arc_type.upper() == "FDSN":
client = FDSNClient(archive)
try:
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
except FDSNException:
warnings.warn('No data on server despite station being ' +
'available...')
continue
elif arc_type.lower() == 'day_vols':
wavfiles = _get_station_file(os.path.join(
archive, day.strftime('Y%Y' + os.sep + 'R%j.01')),
station_map[0], station_map[1])
for wavfile in wavfiles:
st += read(wavfile, starttime=day, endtime=day + length)
st = Stream(st)
return st | Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples | Below is the the instruction that describes the task:
### Input:
Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
### Response:
def read_data(archive, arc_type, day, stachans, length=86400):
"""
Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
"""
st = []
available_stations = _check_available_data(archive, arc_type, day)
for station in stachans:
if len(station[1]) == 2:
# Cope with two char channel naming in seisan
station_map = (station[0], station[1][0] + '*' + station[1][1])
available_stations_map = [(sta[0], sta[1][0] + '*' + sta[1][-1])
for sta in available_stations]
else:
station_map = station
available_stations_map = available_stations
if station_map not in available_stations_map:
msg = ' '.join([station[0], station_map[1], 'is not available for',
day.strftime('%Y/%m/%d')])
warnings.warn(msg)
continue
if arc_type.lower() == 'seishub':
client = SeishubClient(archive)
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
elif arc_type.upper() == "FDSN":
client = FDSNClient(archive)
try:
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
except FDSNException:
warnings.warn('No data on server despite station being ' +
'available...')
continue
elif arc_type.lower() == 'day_vols':
wavfiles = _get_station_file(os.path.join(
archive, day.strftime('Y%Y' + os.sep + 'R%j.01')),
station_map[0], station_map[1])
for wavfile in wavfiles:
st += read(wavfile, starttime=day, endtime=day + length)
st = Stream(st)
return st |
def append(self, tweet):
"""Add a tweet to the end of the list."""
c = self.connection.cursor()
last_tweet = c.execute("SELECT tweet from tweetlist where label='last_tweet'").next()[0]
c.execute("INSERT INTO tweets(message, previous_tweet, next_tweet) VALUES (?,?,NULL)", (tweet, last_tweet))
tweet_id = c.lastrowid
# Set the current tweet as the last tweet
c.execute("UPDATE tweetlist SET tweet=? WHERE label='last_tweet'", (tweet_id,))
# If there was no last_tweet, there was no first_tweet
# so make this the first tweet
if last_tweet is None:
c.execute("UPDATE tweetlist SET tweet=? WHERE label='first_tweet'", (tweet_id,))
else:
# Update the last tweets reference to this one
c.execute("UPDATE tweets SET next_tweet = ? WHERE id= ? ", (tweet_id, last_tweet))
self.connection.commit()
c.close() | Add a tweet to the end of the list. | Below is the the instruction that describes the task:
### Input:
Add a tweet to the end of the list.
### Response:
def append(self, tweet):
"""Add a tweet to the end of the list."""
c = self.connection.cursor()
last_tweet = c.execute("SELECT tweet from tweetlist where label='last_tweet'").next()[0]
c.execute("INSERT INTO tweets(message, previous_tweet, next_tweet) VALUES (?,?,NULL)", (tweet, last_tweet))
tweet_id = c.lastrowid
# Set the current tweet as the last tweet
c.execute("UPDATE tweetlist SET tweet=? WHERE label='last_tweet'", (tweet_id,))
# If there was no last_tweet, there was no first_tweet
# so make this the first tweet
if last_tweet is None:
c.execute("UPDATE tweetlist SET tweet=? WHERE label='first_tweet'", (tweet_id,))
else:
# Update the last tweets reference to this one
c.execute("UPDATE tweets SET next_tweet = ? WHERE id= ? ", (tweet_id, last_tweet))
self.connection.commit()
c.close() |
def verify(self, message, pubkey, rnum, snum):
"""
Verify the signature
for message m, pubkey Y, signature (r,s)
r = xcoord(R)
verify that : G*m+Y*r=R*s
this is true because: { Y=G*x, and R=G*k, s=(m+x*r)/k }
G*m+G*x*r = G*k*(m+x*r)/k ->
G*(m+x*r) = G*(m+x*r)
several ways to do the verification:
r == xcoord[ G*(m/s) + Y*(r/s) ] <<< the standard way
R * s == G*m + Y*r
r == xcoord[ (G*m + Y*r)/s) ]
"""
m = self.GFn.value(message)
r = self.GFn.value(rnum)
s = self.GFn.value(snum)
R = self.G * (m / s) + pubkey * (r / s)
# alternative methods of verifying
# RORG= self.ec.decompress(r, 0)
# RR = self.G * m + pubkey * r
# print "#1: %s .. %s" % (RR, RORG*s)
# print "#2: %s .. %s" % (RR*(1/s), r)
# print "#3: %s .. %s" % (R, r)
return R.x == r | Verify the signature
for message m, pubkey Y, signature (r,s)
r = xcoord(R)
verify that : G*m+Y*r=R*s
this is true because: { Y=G*x, and R=G*k, s=(m+x*r)/k }
G*m+G*x*r = G*k*(m+x*r)/k ->
G*(m+x*r) = G*(m+x*r)
several ways to do the verification:
r == xcoord[ G*(m/s) + Y*(r/s) ] <<< the standard way
R * s == G*m + Y*r
r == xcoord[ (G*m + Y*r)/s) ] | Below is the the instruction that describes the task:
### Input:
Verify the signature
for message m, pubkey Y, signature (r,s)
r = xcoord(R)
verify that : G*m+Y*r=R*s
this is true because: { Y=G*x, and R=G*k, s=(m+x*r)/k }
G*m+G*x*r = G*k*(m+x*r)/k ->
G*(m+x*r) = G*(m+x*r)
several ways to do the verification:
r == xcoord[ G*(m/s) + Y*(r/s) ] <<< the standard way
R * s == G*m + Y*r
r == xcoord[ (G*m + Y*r)/s) ]
### Response:
def verify(self, message, pubkey, rnum, snum):
"""
Verify the signature
for message m, pubkey Y, signature (r,s)
r = xcoord(R)
verify that : G*m+Y*r=R*s
this is true because: { Y=G*x, and R=G*k, s=(m+x*r)/k }
G*m+G*x*r = G*k*(m+x*r)/k ->
G*(m+x*r) = G*(m+x*r)
several ways to do the verification:
r == xcoord[ G*(m/s) + Y*(r/s) ] <<< the standard way
R * s == G*m + Y*r
r == xcoord[ (G*m + Y*r)/s) ]
"""
m = self.GFn.value(message)
r = self.GFn.value(rnum)
s = self.GFn.value(snum)
R = self.G * (m / s) + pubkey * (r / s)
# alternative methods of verifying
# RORG= self.ec.decompress(r, 0)
# RR = self.G * m + pubkey * r
# print "#1: %s .. %s" % (RR, RORG*s)
# print "#2: %s .. %s" % (RR*(1/s), r)
# print "#3: %s .. %s" % (R, r)
return R.x == r |
def bound_bboxes(bboxes):
"""
Finds the minimal bbox that contains all given bboxes
"""
group_x0 = min(map(lambda l: l[x0], bboxes))
group_y0 = min(map(lambda l: l[y0], bboxes))
group_x1 = max(map(lambda l: l[x1], bboxes))
group_y1 = max(map(lambda l: l[y1], bboxes))
return (group_x0, group_y0, group_x1, group_y1) | Finds the minimal bbox that contains all given bboxes | Below is the the instruction that describes the task:
### Input:
Finds the minimal bbox that contains all given bboxes
### Response:
def bound_bboxes(bboxes):
"""
Finds the minimal bbox that contains all given bboxes
"""
group_x0 = min(map(lambda l: l[x0], bboxes))
group_y0 = min(map(lambda l: l[y0], bboxes))
group_x1 = max(map(lambda l: l[x1], bboxes))
group_y1 = max(map(lambda l: l[y1], bboxes))
return (group_x0, group_y0, group_x1, group_y1) |
def SegmentMax(a, ids):
"""
Segmented max op.
"""
func = lambda idxs: np.amax(a[idxs], axis=0)
return seg_map(func, a, ids), | Segmented max op. | Below is the the instruction that describes the task:
### Input:
Segmented max op.
### Response:
def SegmentMax(a, ids):
"""
Segmented max op.
"""
func = lambda idxs: np.amax(a[idxs], axis=0)
return seg_map(func, a, ids), |
def load_data(self, grid_method="gamma", num_samples=1000, condition_threshold=0.5, zero_inflate=False,
percentile=None):
"""
Reads the track forecasts and converts them to grid point values based on random sampling.
Args:
grid_method: "gamma" by default
num_samples: Number of samples drawn from predicted pdf
condition_threshold: Objects are not written to the grid if condition model probability is below this
threshold.
zero_inflate: Whether to sample zeros from a Bernoulli sampler based on the condition model probability
percentile: If None, outputs the mean of the samples at each grid point, otherwise outputs the specified
percentile from 0 to 100.
Returns:
0 if tracks are successfully sampled on to grid. If no tracks are found, returns -1.
"""
self.percentile = percentile
if self.track_forecasts == {}:
self.load_track_forecasts()
if self.track_forecasts == {}:
return -1
if self.data is None:
self.data = np.zeros((len(self.members), self.times.size, self.grid_shape[0], self.grid_shape[1]),
dtype=np.float32)
else:
self.data[:] = 0
if grid_method in ["mean", "median", "samples"]:
for m, member in enumerate(self.members):
print("Sampling " + member)
for track_forecast in self.track_forecasts[member]:
times = track_forecast["properties"]["times"]
for s, step in enumerate(track_forecast["features"]):
forecast_pdf = np.array(step['properties'][self.variable + "_" +
self.ensemble_name.replace(" ", "-")])
forecast_time = self.run_date + timedelta(hours=times[s])
t = np.where(self.times == forecast_time)[0][0]
mask = np.array(step['properties']["masks"], dtype=int)
i = np.array(step['properties']["i"], dtype=int)
i = i[mask == 1]
j = np.array(step['properties']["j"], dtype=int)
j = j[mask == 1]
if grid_method == "samples":
intensities = np.array(step["properties"]["timesteps"], dtype=float)[mask == 1]
rankings = np.argsort(intensities)
samples = np.random.choice(self.forecast_bins, size=intensities.size, replace=True,
p=forecast_pdf)
self.data[m, t, i[rankings], j[rankings]] = samples
else:
if grid_method == "mean":
forecast_value = np.sum(forecast_pdf * self.forecast_bins)
elif grid_method == "median":
forecast_cdf = np.cumsum(forecast_pdf)
forecast_value = self.forecast_bins[np.argmin(np.abs(forecast_cdf - 0.5))]
else:
forecast_value = 0
self.data[m, t, i, j] = forecast_value
if grid_method in ["gamma"]:
full_condition_name = "condition_" + self.condition_model_name.replace(" ", "-")
dist_model_name = self.variable + "_" + self.ensemble_name.replace(" ", "-")
for m, member in enumerate(self.members):
for track_forecast in self.track_forecasts[member]:
times = track_forecast["properties"]["times"]
for s, step in enumerate(track_forecast["features"]):
forecast_params = step["properties"][dist_model_name]
if self.condition_model_name is not None:
condition = step["properties"][full_condition_name]
else:
condition = None
forecast_time = self.run_date + timedelta(hours=times[s])
if forecast_time in self.times:
t = np.where(self.times == forecast_time)[0][0]
mask = np.array(step["properties"]["masks"], dtype=int)
rankings = np.argsort(step["properties"]["timesteps"])[mask == 1]
i = np.array(step["properties"]["i"], dtype=int)[mask == 1][rankings]
j = np.array(step["properties"]["j"], dtype=int)[mask == 1][rankings]
if rankings.size > 0:
raw_samples = np.sort(gamma.rvs(forecast_params[0], loc=forecast_params[1],
scale=forecast_params[2],
size=(num_samples, rankings.size)),
axis=1)
if zero_inflate:
raw_samples *= bernoulli.rvs(condition,
size=(num_samples, rankings.size))
if percentile is None:
samples = raw_samples.mean(axis=0)
else:
samples = np.percentile(raw_samples, percentile, axis=0)
if condition is None or condition >= condition_threshold:
self.data[m, t, i, j] = samples
return 0 | Reads the track forecasts and converts them to grid point values based on random sampling.
Args:
grid_method: "gamma" by default
num_samples: Number of samples drawn from predicted pdf
condition_threshold: Objects are not written to the grid if condition model probability is below this
threshold.
zero_inflate: Whether to sample zeros from a Bernoulli sampler based on the condition model probability
percentile: If None, outputs the mean of the samples at each grid point, otherwise outputs the specified
percentile from 0 to 100.
Returns:
0 if tracks are successfully sampled on to grid. If no tracks are found, returns -1. | Below is the the instruction that describes the task:
### Input:
Reads the track forecasts and converts them to grid point values based on random sampling.
Args:
grid_method: "gamma" by default
num_samples: Number of samples drawn from predicted pdf
condition_threshold: Objects are not written to the grid if condition model probability is below this
threshold.
zero_inflate: Whether to sample zeros from a Bernoulli sampler based on the condition model probability
percentile: If None, outputs the mean of the samples at each grid point, otherwise outputs the specified
percentile from 0 to 100.
Returns:
0 if tracks are successfully sampled on to grid. If no tracks are found, returns -1.
### Response:
def load_data(self, grid_method="gamma", num_samples=1000, condition_threshold=0.5, zero_inflate=False,
percentile=None):
"""
Reads the track forecasts and converts them to grid point values based on random sampling.
Args:
grid_method: "gamma" by default
num_samples: Number of samples drawn from predicted pdf
condition_threshold: Objects are not written to the grid if condition model probability is below this
threshold.
zero_inflate: Whether to sample zeros from a Bernoulli sampler based on the condition model probability
percentile: If None, outputs the mean of the samples at each grid point, otherwise outputs the specified
percentile from 0 to 100.
Returns:
0 if tracks are successfully sampled on to grid. If no tracks are found, returns -1.
"""
self.percentile = percentile
if self.track_forecasts == {}:
self.load_track_forecasts()
if self.track_forecasts == {}:
return -1
if self.data is None:
self.data = np.zeros((len(self.members), self.times.size, self.grid_shape[0], self.grid_shape[1]),
dtype=np.float32)
else:
self.data[:] = 0
if grid_method in ["mean", "median", "samples"]:
for m, member in enumerate(self.members):
print("Sampling " + member)
for track_forecast in self.track_forecasts[member]:
times = track_forecast["properties"]["times"]
for s, step in enumerate(track_forecast["features"]):
forecast_pdf = np.array(step['properties'][self.variable + "_" +
self.ensemble_name.replace(" ", "-")])
forecast_time = self.run_date + timedelta(hours=times[s])
t = np.where(self.times == forecast_time)[0][0]
mask = np.array(step['properties']["masks"], dtype=int)
i = np.array(step['properties']["i"], dtype=int)
i = i[mask == 1]
j = np.array(step['properties']["j"], dtype=int)
j = j[mask == 1]
if grid_method == "samples":
intensities = np.array(step["properties"]["timesteps"], dtype=float)[mask == 1]
rankings = np.argsort(intensities)
samples = np.random.choice(self.forecast_bins, size=intensities.size, replace=True,
p=forecast_pdf)
self.data[m, t, i[rankings], j[rankings]] = samples
else:
if grid_method == "mean":
forecast_value = np.sum(forecast_pdf * self.forecast_bins)
elif grid_method == "median":
forecast_cdf = np.cumsum(forecast_pdf)
forecast_value = self.forecast_bins[np.argmin(np.abs(forecast_cdf - 0.5))]
else:
forecast_value = 0
self.data[m, t, i, j] = forecast_value
if grid_method in ["gamma"]:
full_condition_name = "condition_" + self.condition_model_name.replace(" ", "-")
dist_model_name = self.variable + "_" + self.ensemble_name.replace(" ", "-")
for m, member in enumerate(self.members):
for track_forecast in self.track_forecasts[member]:
times = track_forecast["properties"]["times"]
for s, step in enumerate(track_forecast["features"]):
forecast_params = step["properties"][dist_model_name]
if self.condition_model_name is not None:
condition = step["properties"][full_condition_name]
else:
condition = None
forecast_time = self.run_date + timedelta(hours=times[s])
if forecast_time in self.times:
t = np.where(self.times == forecast_time)[0][0]
mask = np.array(step["properties"]["masks"], dtype=int)
rankings = np.argsort(step["properties"]["timesteps"])[mask == 1]
i = np.array(step["properties"]["i"], dtype=int)[mask == 1][rankings]
j = np.array(step["properties"]["j"], dtype=int)[mask == 1][rankings]
if rankings.size > 0:
raw_samples = np.sort(gamma.rvs(forecast_params[0], loc=forecast_params[1],
scale=forecast_params[2],
size=(num_samples, rankings.size)),
axis=1)
if zero_inflate:
raw_samples *= bernoulli.rvs(condition,
size=(num_samples, rankings.size))
if percentile is None:
samples = raw_samples.mean(axis=0)
else:
samples = np.percentile(raw_samples, percentile, axis=0)
if condition is None or condition >= condition_threshold:
self.data[m, t, i, j] = samples
return 0 |
def hex_2_rgb(self, color):
"""
convert a hex color to rgb
"""
if not self.RE_HEX.match(color):
color = "#FFF"
if len(color) == 7:
return (int(color[i : i + 2], 16) / 255 for i in [1, 3, 5])
return (int(c, 16) / 15 for c in color) | convert a hex color to rgb | Below is the the instruction that describes the task:
### Input:
convert a hex color to rgb
### Response:
def hex_2_rgb(self, color):
"""
convert a hex color to rgb
"""
if not self.RE_HEX.match(color):
color = "#FFF"
if len(color) == 7:
return (int(color[i : i + 2], 16) / 255 for i in [1, 3, 5])
return (int(c, 16) / 15 for c in color) |
def apply_activation(
books,
x,
activation,
activation_args=(),
activation_kwargs=None):
"""Returns activation(x, *activation_args, **activation_kwargs).
This applies the given activation and adds useful summaries specific to the
activation.
Args:
books: The bookkeeper.
x: The tensor to apply activation to.
activation: An activation function.
activation_args: Optional additional arguments for the activation.
activation_kwargs: Optional keyword args for activation.
Returns:
A tensor with activation applied to x.
"""
if activation is None:
return x
if activation_kwargs is None:
activation_kwargs = {}
y = activation(x, *activation_args, **activation_kwargs)
if activation in (tf.nn.relu, functions.leaky_relu, functions.softplus):
books.add_scalar_summary(
tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)),
'%s/zeros' % y.op.name)
elif activation is tf.nn.relu6:
books.add_scalar_summary(
tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)),
'%s/zeros' % y.op.name)
books.add_scalar_summary(
tf.reduce_mean(tf.cast(tf.greater(x, 6.0), tf.float32)),
'%s/sixes' % y.op.name)
elif activation in (functions.l2_normalize, tf.nn.l2_normalize,
functions.l1_normalize):
books.add_scalar_summary(
tf.reduce_mean(tf.sqrt(tf.reduce_sum(
tf.square(x), 1))), '%s/length' % y.op.name)
return y | Returns activation(x, *activation_args, **activation_kwargs).
This applies the given activation and adds useful summaries specific to the
activation.
Args:
books: The bookkeeper.
x: The tensor to apply activation to.
activation: An activation function.
activation_args: Optional additional arguments for the activation.
activation_kwargs: Optional keyword args for activation.
Returns:
A tensor with activation applied to x. | Below is the the instruction that describes the task:
### Input:
Returns activation(x, *activation_args, **activation_kwargs).
This applies the given activation and adds useful summaries specific to the
activation.
Args:
books: The bookkeeper.
x: The tensor to apply activation to.
activation: An activation function.
activation_args: Optional additional arguments for the activation.
activation_kwargs: Optional keyword args for activation.
Returns:
A tensor with activation applied to x.
### Response:
def apply_activation(
books,
x,
activation,
activation_args=(),
activation_kwargs=None):
"""Returns activation(x, *activation_args, **activation_kwargs).
This applies the given activation and adds useful summaries specific to the
activation.
Args:
books: The bookkeeper.
x: The tensor to apply activation to.
activation: An activation function.
activation_args: Optional additional arguments for the activation.
activation_kwargs: Optional keyword args for activation.
Returns:
A tensor with activation applied to x.
"""
if activation is None:
return x
if activation_kwargs is None:
activation_kwargs = {}
y = activation(x, *activation_args, **activation_kwargs)
if activation in (tf.nn.relu, functions.leaky_relu, functions.softplus):
books.add_scalar_summary(
tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)),
'%s/zeros' % y.op.name)
elif activation is tf.nn.relu6:
books.add_scalar_summary(
tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)),
'%s/zeros' % y.op.name)
books.add_scalar_summary(
tf.reduce_mean(tf.cast(tf.greater(x, 6.0), tf.float32)),
'%s/sixes' % y.op.name)
elif activation in (functions.l2_normalize, tf.nn.l2_normalize,
functions.l1_normalize):
books.add_scalar_summary(
tf.reduce_mean(tf.sqrt(tf.reduce_sum(
tf.square(x), 1))), '%s/length' % y.op.name)
return y |
def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result | Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance. | Below is the the instruction that describes the task:
### Input:
Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
### Response:
def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result |
def _get_erase_command(self, drive, pattern):
"""Return the command arguments based on the pattern.
Erase command examples:
1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=overwrite unrestricted=off forced"
2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=zero forced"
:param drive: A string with comma separated list of drives.
:param pattern: A string which defines the type of erase.
:returns: A list of ssacli command arguments.
"""
cmd_args = []
cmd_args.append("pd %s" % drive)
cmd_args.extend(['modify', 'erase', pattern])
if pattern != 'erasepattern=zero':
cmd_args.append('unrestricted=off')
cmd_args.append('forced')
return cmd_args | Return the command arguments based on the pattern.
Erase command examples:
1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=overwrite unrestricted=off forced"
2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=zero forced"
:param drive: A string with comma separated list of drives.
:param pattern: A string which defines the type of erase.
:returns: A list of ssacli command arguments. | Below is the the instruction that describes the task:
### Input:
Return the command arguments based on the pattern.
Erase command examples:
1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=overwrite unrestricted=off forced"
2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=zero forced"
:param drive: A string with comma separated list of drives.
:param pattern: A string which defines the type of erase.
:returns: A list of ssacli command arguments.
### Response:
def _get_erase_command(self, drive, pattern):
"""Return the command arguments based on the pattern.
Erase command examples:
1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=overwrite unrestricted=off forced"
2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=zero forced"
:param drive: A string with comma separated list of drives.
:param pattern: A string which defines the type of erase.
:returns: A list of ssacli command arguments.
"""
cmd_args = []
cmd_args.append("pd %s" % drive)
cmd_args.extend(['modify', 'erase', pattern])
if pattern != 'erasepattern=zero':
cmd_args.append('unrestricted=off')
cmd_args.append('forced')
return cmd_args |
def _GetIdentifierFromPath(self, parser_mediator):
"""Extracts a container or a graph ID from a JSON file's path.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
Returns:
str: container or graph identifier.
"""
file_entry = parser_mediator.GetFileEntry()
path = file_entry.path_spec.location
file_system = file_entry.GetFileSystem()
path_segments = file_system.SplitPath(path)
return path_segments[-2] | Extracts a container or a graph ID from a JSON file's path.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
Returns:
str: container or graph identifier. | Below is the the instruction that describes the task:
### Input:
Extracts a container or a graph ID from a JSON file's path.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
Returns:
str: container or graph identifier.
### Response:
def _GetIdentifierFromPath(self, parser_mediator):
"""Extracts a container or a graph ID from a JSON file's path.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
Returns:
str: container or graph identifier.
"""
file_entry = parser_mediator.GetFileEntry()
path = file_entry.path_spec.location
file_system = file_entry.GetFileSystem()
path_segments = file_system.SplitPath(path)
return path_segments[-2] |
def exists_locked(filepath: str) -> Tuple[bool, bool]:
"""
Checks if a file is locked by opening it in append mode.
(If no exception is thrown in that situation, then the file is not locked.)
Args:
filepath: file to check
Returns:
tuple: ``(exists, locked)``
See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/.
"""
exists = False
locked = None
file_object = None
if os.path.exists(filepath):
exists = True
locked = True
try:
buffer_size = 8
# Opening file in append mode and read the first 8 characters.
file_object = open(filepath, 'a', buffer_size)
if file_object:
locked = False # exists and not locked
except IOError:
pass
finally:
if file_object:
file_object.close()
return exists, locked | Checks if a file is locked by opening it in append mode.
(If no exception is thrown in that situation, then the file is not locked.)
Args:
filepath: file to check
Returns:
tuple: ``(exists, locked)``
See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/. | Below is the the instruction that describes the task:
### Input:
Checks if a file is locked by opening it in append mode.
(If no exception is thrown in that situation, then the file is not locked.)
Args:
filepath: file to check
Returns:
tuple: ``(exists, locked)``
See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/.
### Response:
def exists_locked(filepath: str) -> Tuple[bool, bool]:
"""
Checks if a file is locked by opening it in append mode.
(If no exception is thrown in that situation, then the file is not locked.)
Args:
filepath: file to check
Returns:
tuple: ``(exists, locked)``
See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/.
"""
exists = False
locked = None
file_object = None
if os.path.exists(filepath):
exists = True
locked = True
try:
buffer_size = 8
# Opening file in append mode and read the first 8 characters.
file_object = open(filepath, 'a', buffer_size)
if file_object:
locked = False # exists and not locked
except IOError:
pass
finally:
if file_object:
file_object.close()
return exists, locked |
def __calculate_dataset_difference(self, amount_clusters):
"""!
@brief Calculate distance from each point to each cluster center.
"""
dataset_differences = numpy.zeros((amount_clusters, len(self.__pointer_data)))
for index_center in range(amount_clusters):
if self.__metric.get_type() != type_metric.USER_DEFINED:
dataset_differences[index_center] = self.__metric(self.__pointer_data, self.__centers[index_center])
else:
dataset_differences[index_center] = [ self.__metric(point, self.__centers[index_center])
for point in self.__pointer_data ]
return dataset_differences | !
@brief Calculate distance from each point to each cluster center. | Below is the the instruction that describes the task:
### Input:
!
@brief Calculate distance from each point to each cluster center.
### Response:
def __calculate_dataset_difference(self, amount_clusters):
"""!
@brief Calculate distance from each point to each cluster center.
"""
dataset_differences = numpy.zeros((amount_clusters, len(self.__pointer_data)))
for index_center in range(amount_clusters):
if self.__metric.get_type() != type_metric.USER_DEFINED:
dataset_differences[index_center] = self.__metric(self.__pointer_data, self.__centers[index_center])
else:
dataset_differences[index_center] = [ self.__metric(point, self.__centers[index_center])
for point in self.__pointer_data ]
return dataset_differences |
def analysis_provenance_details_simplified_extractor(
impact_report, component_metadata):
"""Extracting simplified version of provenance details of layers.
This extractor will produce provenance details which will be displayed in
the main report.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
extra_args = component_metadata.extra_args
default_source = resolve_from_dictionary(
extra_args, ['defaults', 'source'])
default_reference = resolve_from_dictionary(
extra_args, ['defaults', 'reference'])
provenance_format_args = resolve_from_dictionary(
extra_args, 'provenance_format')
hazard_keywords = impact_report.impact_function.provenance[
'hazard_keywords']
header = resolve_from_dictionary(
provenance_format_args, 'hazard_header')
provenance_format = resolve_from_dictionary(
provenance_format_args, 'hazard_format')
hazard_provenance = {
'header': header,
'provenance': provenance_format.format(
layer_name=hazard_keywords.get('title'),
source=QgsDataSourceUri.removePassword(
decode_full_layer_uri(hazard_keywords.get('source'))[0]
or default_source))
}
exposure_keywords = impact_report.impact_function.provenance[
'exposure_keywords']
header = resolve_from_dictionary(
provenance_format_args, 'exposure_header')
provenance_format = resolve_from_dictionary(
provenance_format_args, 'exposure_format')
exposure_provenance = {
'header': header,
'provenance': provenance_format.format(
layer_name=exposure_keywords.get('title'),
source=QgsDataSourceUri.removePassword(
decode_full_layer_uri(exposure_keywords.get('source'))[0]
or default_source))
}
aggregation_keywords = impact_report.impact_function.provenance[
'aggregation_keywords']
header = resolve_from_dictionary(
provenance_format_args, 'aggregation_header')
provenance_format = resolve_from_dictionary(
provenance_format_args, 'aggregation_format')
# only if aggregation layer used
if aggregation_keywords:
provenance_string = provenance_format.format(
layer_name=aggregation_keywords.get('title'),
source=QgsDataSourceUri.removePassword(
decode_full_layer_uri(aggregation_keywords.get('source'))[0]
or default_source))
else:
aggregation_not_used = resolve_from_dictionary(
extra_args, ['defaults', 'aggregation_not_used'])
provenance_string = aggregation_not_used
aggregation_provenance = {
'header': header,
'provenance': provenance_string
}
impact_function_name = impact_report.impact_function.name
header = resolve_from_dictionary(
provenance_format_args, 'impact_function_header')
provenance_format = resolve_from_dictionary(
provenance_format_args, 'impact_function_format')
impact_function_provenance = {
'header': header,
'provenance': provenance_format.format(
impact_function_name=impact_function_name,
reference=default_reference)
}
provenance_detail = OrderedDict()
provenance_detail['hazard'] = hazard_provenance
provenance_detail['exposure'] = exposure_provenance
provenance_detail['aggregation'] = aggregation_provenance
provenance_detail['impact_function'] = impact_function_provenance
analysis_details_header = resolve_from_dictionary(
extra_args, ['header', 'analysis_detail'])
context['component_key'] = component_metadata.key
context.update({
'header': analysis_details_header,
'details': provenance_detail
})
return context | Extracting simplified version of provenance details of layers.
This extractor will produce provenance details which will be displayed in
the main report.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0 | Below is the the instruction that describes the task:
### Input:
Extracting simplified version of provenance details of layers.
This extractor will produce provenance details which will be displayed in
the main report.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
### Response:
def analysis_provenance_details_simplified_extractor(
impact_report, component_metadata):
"""Extracting simplified version of provenance details of layers.
This extractor will produce provenance details which will be displayed in
the main report.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
extra_args = component_metadata.extra_args
default_source = resolve_from_dictionary(
extra_args, ['defaults', 'source'])
default_reference = resolve_from_dictionary(
extra_args, ['defaults', 'reference'])
provenance_format_args = resolve_from_dictionary(
extra_args, 'provenance_format')
hazard_keywords = impact_report.impact_function.provenance[
'hazard_keywords']
header = resolve_from_dictionary(
provenance_format_args, 'hazard_header')
provenance_format = resolve_from_dictionary(
provenance_format_args, 'hazard_format')
hazard_provenance = {
'header': header,
'provenance': provenance_format.format(
layer_name=hazard_keywords.get('title'),
source=QgsDataSourceUri.removePassword(
decode_full_layer_uri(hazard_keywords.get('source'))[0]
or default_source))
}
exposure_keywords = impact_report.impact_function.provenance[
'exposure_keywords']
header = resolve_from_dictionary(
provenance_format_args, 'exposure_header')
provenance_format = resolve_from_dictionary(
provenance_format_args, 'exposure_format')
exposure_provenance = {
'header': header,
'provenance': provenance_format.format(
layer_name=exposure_keywords.get('title'),
source=QgsDataSourceUri.removePassword(
decode_full_layer_uri(exposure_keywords.get('source'))[0]
or default_source))
}
aggregation_keywords = impact_report.impact_function.provenance[
'aggregation_keywords']
header = resolve_from_dictionary(
provenance_format_args, 'aggregation_header')
provenance_format = resolve_from_dictionary(
provenance_format_args, 'aggregation_format')
# only if aggregation layer used
if aggregation_keywords:
provenance_string = provenance_format.format(
layer_name=aggregation_keywords.get('title'),
source=QgsDataSourceUri.removePassword(
decode_full_layer_uri(aggregation_keywords.get('source'))[0]
or default_source))
else:
aggregation_not_used = resolve_from_dictionary(
extra_args, ['defaults', 'aggregation_not_used'])
provenance_string = aggregation_not_used
aggregation_provenance = {
'header': header,
'provenance': provenance_string
}
impact_function_name = impact_report.impact_function.name
header = resolve_from_dictionary(
provenance_format_args, 'impact_function_header')
provenance_format = resolve_from_dictionary(
provenance_format_args, 'impact_function_format')
impact_function_provenance = {
'header': header,
'provenance': provenance_format.format(
impact_function_name=impact_function_name,
reference=default_reference)
}
provenance_detail = OrderedDict()
provenance_detail['hazard'] = hazard_provenance
provenance_detail['exposure'] = exposure_provenance
provenance_detail['aggregation'] = aggregation_provenance
provenance_detail['impact_function'] = impact_function_provenance
analysis_details_header = resolve_from_dictionary(
extra_args, ['header', 'analysis_detail'])
context['component_key'] = component_metadata.key
context.update({
'header': analysis_details_header,
'details': provenance_detail
})
return context |
def decode_response(content: bytes) -> set:
""" adb response text -> device set """
content = content[4:].decode(config.ENCODING)
if '\t' not in content and '\n' not in content:
return set()
connected_devices = set()
device_list = [i for i in content.split('\n') if i]
for each_device in device_list:
device_id, device_status = each_device.split('\t')
if device_status == 'device':
connected_devices.add(device_id)
return connected_devices | adb response text -> device set | Below is the the instruction that describes the task:
### Input:
adb response text -> device set
### Response:
def decode_response(content: bytes) -> set:
""" adb response text -> device set """
content = content[4:].decode(config.ENCODING)
if '\t' not in content and '\n' not in content:
return set()
connected_devices = set()
device_list = [i for i in content.split('\n') if i]
for each_device in device_list:
device_id, device_status = each_device.split('\t')
if device_status == 'device':
connected_devices.add(device_id)
return connected_devices |
def get_files(client, bucket, prefix=''):
"""Lists files/objects on a bucket.
TODO: docstring"""
bucket = client.get_bucket(bucket)
files = list(bucket.list_blobs(prefix=prefix))
return files | Lists files/objects on a bucket.
TODO: docstring | Below is the the instruction that describes the task:
### Input:
Lists files/objects on a bucket.
TODO: docstring
### Response:
def get_files(client, bucket, prefix=''):
"""Lists files/objects on a bucket.
TODO: docstring"""
bucket = client.get_bucket(bucket)
files = list(bucket.list_blobs(prefix=prefix))
return files |
def build_config(config_file=get_system_config_directory()):
"""
Construct the config object from necessary elements.
"""
config = Config(config_file, allow_no_value=True)
application_versions = find_applications_on_system()
# Add found versions to config if they don't exist. Versions found
# in the config file takes precedence over versions found in PATH.
for item in application_versions.iteritems():
if not config.has_option(Config.EXECUTABLES, item[0]):
config.set(Config.EXECUTABLES, item[0], item[1])
return config | Construct the config object from necessary elements. | Below is the the instruction that describes the task:
### Input:
Construct the config object from necessary elements.
### Response:
def build_config(config_file=get_system_config_directory()):
"""
Construct the config object from necessary elements.
"""
config = Config(config_file, allow_no_value=True)
application_versions = find_applications_on_system()
# Add found versions to config if they don't exist. Versions found
# in the config file takes precedence over versions found in PATH.
for item in application_versions.iteritems():
if not config.has_option(Config.EXECUTABLES, item[0]):
config.set(Config.EXECUTABLES, item[0], item[1])
return config |
def commit(self, offset=None, limit=None, dryrun=False):
""" Start the rsync download """
self.stream.command = "rsync -avRK --files-from={path} {source} {destination}"
self.stream.append_tasks_to_streamlets(offset=offset, limit=limit)
self.stream.commit_streamlets()
self.stream.run_streamlets()
self.stream.reset_streamlet() | Start the rsync download | Below is the the instruction that describes the task:
### Input:
Start the rsync download
### Response:
def commit(self, offset=None, limit=None, dryrun=False):
""" Start the rsync download """
self.stream.command = "rsync -avRK --files-from={path} {source} {destination}"
self.stream.append_tasks_to_streamlets(offset=offset, limit=limit)
self.stream.commit_streamlets()
self.stream.run_streamlets()
self.stream.reset_streamlet() |
def _record_sort_by_indicators(record):
"""Sort the fields inside the record by indicators."""
for tag, fields in record.items():
record[tag] = _fields_sort_by_indicators(fields) | Sort the fields inside the record by indicators. | Below is the the instruction that describes the task:
### Input:
Sort the fields inside the record by indicators.
### Response:
def _record_sort_by_indicators(record):
"""Sort the fields inside the record by indicators."""
for tag, fields in record.items():
record[tag] = _fields_sort_by_indicators(fields) |
def main():
"Process CLI arguments and call appropriate functions."
try:
args = docopt.docopt(__doc__, version=__about__.__version__)
except docopt.DocoptExit:
if len(sys.argv) > 1:
print(f"{Fore.RED}Invalid command syntax, "
f"check help:{Fore.RESET}\n")
print(__doc__)
sys.exit(1)
print_all = False
if not (args["--int-width"] or args["--int-height"] or args["--decimal"]):
print_all = True
width = float(args["WIDTH"])
height = float(args["HEIGHT"])
as_int_ = as_int(width, height)
as_float_ = as_float(width, height)
if args["--ndigits"]:
as_float_ = round(as_float_, int(args["--ndigits"]))
to_print = []
if args["--int-width"] or print_all:
to_print.append(f"{Fore.BLUE}{as_int_[0]!s}")
if args["--int-height"] or print_all:
to_print.append(f"{Fore.BLUE}{as_int_[1]!s}")
if args["--decimal"] or print_all:
to_print.append(f"{Fore.MAGENTA}{as_float_!s}")
print(" ".join(to_print)) | Process CLI arguments and call appropriate functions. | Below is the the instruction that describes the task:
### Input:
Process CLI arguments and call appropriate functions.
### Response:
def main():
"Process CLI arguments and call appropriate functions."
try:
args = docopt.docopt(__doc__, version=__about__.__version__)
except docopt.DocoptExit:
if len(sys.argv) > 1:
print(f"{Fore.RED}Invalid command syntax, "
f"check help:{Fore.RESET}\n")
print(__doc__)
sys.exit(1)
print_all = False
if not (args["--int-width"] or args["--int-height"] or args["--decimal"]):
print_all = True
width = float(args["WIDTH"])
height = float(args["HEIGHT"])
as_int_ = as_int(width, height)
as_float_ = as_float(width, height)
if args["--ndigits"]:
as_float_ = round(as_float_, int(args["--ndigits"]))
to_print = []
if args["--int-width"] or print_all:
to_print.append(f"{Fore.BLUE}{as_int_[0]!s}")
if args["--int-height"] or print_all:
to_print.append(f"{Fore.BLUE}{as_int_[1]!s}")
if args["--decimal"] or print_all:
to_print.append(f"{Fore.MAGENTA}{as_float_!s}")
print(" ".join(to_print)) |
def findSector(self,x,y):
'''
Finds the quadrilateral "sector" for each (x,y) point in the input.
Only called as a subroutine of _evaluate().
Parameters
----------
x : np.array
Values whose sector should be found.
y : np.array
Values whose sector should be found. Should be same size as x.
Returns
-------
x_pos : np.array
Sector x-coordinates for each point of the input, of the same size.
y_pos : np.array
Sector y-coordinates for each point of the input, of the same size.
'''
# Initialize the sector guess
m = x.size
x_pos_guess = (np.ones(m)*self.x_n/2).astype(int)
y_pos_guess = (np.ones(m)*self.y_n/2).astype(int)
# Define a function that checks whether a set of points violates a linear
# boundary defined by (x_bound_1,y_bound_1) and (x_bound_2,y_bound_2),
# where the latter is *COUNTER CLOCKWISE* from the former. Returns
# 1 if the point is outside the boundary and 0 otherwise.
violationCheck = lambda x_check,y_check,x_bound_1,y_bound_1,x_bound_2,y_bound_2 : (
(y_bound_2 - y_bound_1)*x_check - (x_bound_2 - x_bound_1)*y_check > x_bound_1*y_bound_2 - y_bound_1*x_bound_2 ) + 0
# Identify the correct sector for each point to be evaluated
these = np.ones(m,dtype=bool)
max_loops = self.x_n + self.y_n
loops = 0
while np.any(these) and loops < max_loops:
# Get coordinates for the four vertices: (xA,yA),...,(xD,yD)
x_temp = x[these]
y_temp = y[these]
xA = self.x_values[x_pos_guess[these],y_pos_guess[these]]
xB = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]]
xC = self.x_values[x_pos_guess[these],y_pos_guess[these]+1]
xD = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]+1]
yA = self.y_values[x_pos_guess[these],y_pos_guess[these]]
yB = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]]
yC = self.y_values[x_pos_guess[these],y_pos_guess[these]+1]
yD = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]+1]
# Check the "bounding box" for the sector: is this guess plausible?
move_down = (y_temp < np.minimum(yA,yB)) + 0
move_right = (x_temp > np.maximum(xB,xD)) + 0
move_up = (y_temp > np.maximum(yC,yD)) + 0
move_left = (x_temp < np.minimum(xA,xC)) + 0
# Check which boundaries are violated (and thus where to look next)
c = (move_down + move_right + move_up + move_left) == 0
move_down[c] = violationCheck(x_temp[c],y_temp[c],xA[c],yA[c],xB[c],yB[c])
move_right[c] = violationCheck(x_temp[c],y_temp[c],xB[c],yB[c],xD[c],yD[c])
move_up[c] = violationCheck(x_temp[c],y_temp[c],xD[c],yD[c],xC[c],yC[c])
move_left[c] = violationCheck(x_temp[c],y_temp[c],xC[c],yC[c],xA[c],yA[c])
# Update the sector guess based on the violations
x_pos_next = x_pos_guess[these] - move_left + move_right
x_pos_next[x_pos_next < 0] = 0
x_pos_next[x_pos_next > (self.x_n-2)] = self.x_n-2
y_pos_next = y_pos_guess[these] - move_down + move_up
y_pos_next[y_pos_next < 0] = 0
y_pos_next[y_pos_next > (self.y_n-2)] = self.y_n-2
# Check which sectors have not changed, and mark them as complete
no_move = np.array(np.logical_and(x_pos_guess[these] == x_pos_next, y_pos_guess[these] == y_pos_next))
x_pos_guess[these] = x_pos_next
y_pos_guess[these] = y_pos_next
temp = these.nonzero()
these[temp[0][no_move]] = False
# Move to the next iteration of the search
loops += 1
# Return the output
x_pos = x_pos_guess
y_pos = y_pos_guess
return x_pos, y_pos | Finds the quadrilateral "sector" for each (x,y) point in the input.
Only called as a subroutine of _evaluate().
Parameters
----------
x : np.array
Values whose sector should be found.
y : np.array
Values whose sector should be found. Should be same size as x.
Returns
-------
x_pos : np.array
Sector x-coordinates for each point of the input, of the same size.
y_pos : np.array
Sector y-coordinates for each point of the input, of the same size. | Below is the the instruction that describes the task:
### Input:
Finds the quadrilateral "sector" for each (x,y) point in the input.
Only called as a subroutine of _evaluate().
Parameters
----------
x : np.array
Values whose sector should be found.
y : np.array
Values whose sector should be found. Should be same size as x.
Returns
-------
x_pos : np.array
Sector x-coordinates for each point of the input, of the same size.
y_pos : np.array
Sector y-coordinates for each point of the input, of the same size.
### Response:
def findSector(self,x,y):
'''
Finds the quadrilateral "sector" for each (x,y) point in the input.
Only called as a subroutine of _evaluate().
Parameters
----------
x : np.array
Values whose sector should be found.
y : np.array
Values whose sector should be found. Should be same size as x.
Returns
-------
x_pos : np.array
Sector x-coordinates for each point of the input, of the same size.
y_pos : np.array
Sector y-coordinates for each point of the input, of the same size.
'''
# Initialize the sector guess
m = x.size
x_pos_guess = (np.ones(m)*self.x_n/2).astype(int)
y_pos_guess = (np.ones(m)*self.y_n/2).astype(int)
# Define a function that checks whether a set of points violates a linear
# boundary defined by (x_bound_1,y_bound_1) and (x_bound_2,y_bound_2),
# where the latter is *COUNTER CLOCKWISE* from the former. Returns
# 1 if the point is outside the boundary and 0 otherwise.
violationCheck = lambda x_check,y_check,x_bound_1,y_bound_1,x_bound_2,y_bound_2 : (
(y_bound_2 - y_bound_1)*x_check - (x_bound_2 - x_bound_1)*y_check > x_bound_1*y_bound_2 - y_bound_1*x_bound_2 ) + 0
# Identify the correct sector for each point to be evaluated
these = np.ones(m,dtype=bool)
max_loops = self.x_n + self.y_n
loops = 0
while np.any(these) and loops < max_loops:
# Get coordinates for the four vertices: (xA,yA),...,(xD,yD)
x_temp = x[these]
y_temp = y[these]
xA = self.x_values[x_pos_guess[these],y_pos_guess[these]]
xB = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]]
xC = self.x_values[x_pos_guess[these],y_pos_guess[these]+1]
xD = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]+1]
yA = self.y_values[x_pos_guess[these],y_pos_guess[these]]
yB = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]]
yC = self.y_values[x_pos_guess[these],y_pos_guess[these]+1]
yD = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]+1]
# Check the "bounding box" for the sector: is this guess plausible?
move_down = (y_temp < np.minimum(yA,yB)) + 0
move_right = (x_temp > np.maximum(xB,xD)) + 0
move_up = (y_temp > np.maximum(yC,yD)) + 0
move_left = (x_temp < np.minimum(xA,xC)) + 0
# Check which boundaries are violated (and thus where to look next)
c = (move_down + move_right + move_up + move_left) == 0
move_down[c] = violationCheck(x_temp[c],y_temp[c],xA[c],yA[c],xB[c],yB[c])
move_right[c] = violationCheck(x_temp[c],y_temp[c],xB[c],yB[c],xD[c],yD[c])
move_up[c] = violationCheck(x_temp[c],y_temp[c],xD[c],yD[c],xC[c],yC[c])
move_left[c] = violationCheck(x_temp[c],y_temp[c],xC[c],yC[c],xA[c],yA[c])
# Update the sector guess based on the violations
x_pos_next = x_pos_guess[these] - move_left + move_right
x_pos_next[x_pos_next < 0] = 0
x_pos_next[x_pos_next > (self.x_n-2)] = self.x_n-2
y_pos_next = y_pos_guess[these] - move_down + move_up
y_pos_next[y_pos_next < 0] = 0
y_pos_next[y_pos_next > (self.y_n-2)] = self.y_n-2
# Check which sectors have not changed, and mark them as complete
no_move = np.array(np.logical_and(x_pos_guess[these] == x_pos_next, y_pos_guess[these] == y_pos_next))
x_pos_guess[these] = x_pos_next
y_pos_guess[these] = y_pos_next
temp = these.nonzero()
these[temp[0][no_move]] = False
# Move to the next iteration of the search
loops += 1
# Return the output
x_pos = x_pos_guess
y_pos = y_pos_guess
return x_pos, y_pos |
def fo_pct_by_zone(self):
"""
Get the by team face-off win % by zone. Format is
:returns: dict ``{ 'home/away': { 'off/def/neut': % } }``
"""
bz = self.by_zone
return {
t: {
z: bz[t][z]['won']/(1.0*bz[t][z]['total']) if bz[t][z]['total'] else 0.0
for z in self.__zones
if z != 'all'
}
for t in [ 'home', 'away' ]
} | Get the by team face-off win % by zone. Format is
:returns: dict ``{ 'home/away': { 'off/def/neut': % } }`` | Below is the the instruction that describes the task:
### Input:
Get the by team face-off win % by zone. Format is
:returns: dict ``{ 'home/away': { 'off/def/neut': % } }``
### Response:
def fo_pct_by_zone(self):
"""
Get the by team face-off win % by zone. Format is
:returns: dict ``{ 'home/away': { 'off/def/neut': % } }``
"""
bz = self.by_zone
return {
t: {
z: bz[t][z]['won']/(1.0*bz[t][z]['total']) if bz[t][z]['total'] else 0.0
for z in self.__zones
if z != 'all'
}
for t in [ 'home', 'away' ]
} |
def sub_path(self, path):
""" If this redirect is a regular expression, it will return a
rewritten version of `path`; otherwise returns the `new_path`. """
if not self.regular_expression:
return self.new_path
return re.sub(self.old_path, self.new_path, path) | If this redirect is a regular expression, it will return a
rewritten version of `path`; otherwise returns the `new_path`. | Below is the the instruction that describes the task:
### Input:
If this redirect is a regular expression, it will return a
rewritten version of `path`; otherwise returns the `new_path`.
### Response:
def sub_path(self, path):
""" If this redirect is a regular expression, it will return a
rewritten version of `path`; otherwise returns the `new_path`. """
if not self.regular_expression:
return self.new_path
return re.sub(self.old_path, self.new_path, path) |
def check_terminate(self):
"""
Returns a Bool of whether to terminate.
Checks whether a satisfactory minimum has been found or whether
too many iterations have occurred.
"""
if not self._has_run:
return False
else:
#1-3. errtol, paramtol, model cosine low enough?
terminate = self.check_completion()
#4. too many iterations??
terminate |= (self._num_iter >= self.max_iter)
return terminate | Returns a Bool of whether to terminate.
Checks whether a satisfactory minimum has been found or whether
too many iterations have occurred. | Below is the the instruction that describes the task:
### Input:
Returns a Bool of whether to terminate.
Checks whether a satisfactory minimum has been found or whether
too many iterations have occurred.
### Response:
def check_terminate(self):
"""
Returns a Bool of whether to terminate.
Checks whether a satisfactory minimum has been found or whether
too many iterations have occurred.
"""
if not self._has_run:
return False
else:
#1-3. errtol, paramtol, model cosine low enough?
terminate = self.check_completion()
#4. too many iterations??
terminate |= (self._num_iter >= self.max_iter)
return terminate |
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key]) | Add `dist` if we ``can_add()`` it and it isn't already added | Below is the the instruction that describes the task:
### Input:
Add `dist` if we ``can_add()`` it and it isn't already added
### Response:
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key]) |
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved | Monkey-patch tempfile.tempdir with replacement, ensuring it exists | Below is the the instruction that describes the task:
### Input:
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
### Response:
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved |
def get_tracks(self, catalog, cache=True):
"""Get the tracks for a song given a catalog.
Args:
catalog (str): a string representing the catalog whose track you want to retrieve.
Returns:
A list of Track dicts.
Example:
>>> s = song.Song('SOWDASQ12A6310F24F')
>>> s.get_tracks('7digital')[0]
{u'catalog': u'7digital',
u'foreign_id': u'7digital:track:8445818',
u'id': u'TRJGNNY12903CC625C',
u'preview_url': u'http://previews.7digital.com/clips/34/8445818.clip.mp3',
u'release_image': u'http://cdn.7static.com/static/img/sleeveart/00/007/628/0000762838_200.jpg'}
>>>
"""
if not (cache and ('tracks' in self.cache) and (catalog in [td['catalog'] for td in self.cache['tracks']])):
kwargs = {
'bucket':['tracks', 'id:%s' % catalog],
}
response = self.get_attribute('profile', **kwargs)
if not 'tracks' in self.cache:
self.cache['tracks'] = []
# don't blow away the cache for other catalogs
potential_tracks = response['songs'][0].get('tracks', [])
existing_track_ids = [tr['foreign_id'] for tr in self.cache['tracks']]
new_tds = filter(lambda tr: tr['foreign_id'] not in existing_track_ids, potential_tracks)
self.cache['tracks'].extend(new_tds)
return filter(lambda tr: tr['catalog']==util.map_idspace(catalog), self.cache['tracks']) | Get the tracks for a song given a catalog.
Args:
catalog (str): a string representing the catalog whose track you want to retrieve.
Returns:
A list of Track dicts.
Example:
>>> s = song.Song('SOWDASQ12A6310F24F')
>>> s.get_tracks('7digital')[0]
{u'catalog': u'7digital',
u'foreign_id': u'7digital:track:8445818',
u'id': u'TRJGNNY12903CC625C',
u'preview_url': u'http://previews.7digital.com/clips/34/8445818.clip.mp3',
u'release_image': u'http://cdn.7static.com/static/img/sleeveart/00/007/628/0000762838_200.jpg'}
>>> | Below is the the instruction that describes the task:
### Input:
Get the tracks for a song given a catalog.
Args:
catalog (str): a string representing the catalog whose track you want to retrieve.
Returns:
A list of Track dicts.
Example:
>>> s = song.Song('SOWDASQ12A6310F24F')
>>> s.get_tracks('7digital')[0]
{u'catalog': u'7digital',
u'foreign_id': u'7digital:track:8445818',
u'id': u'TRJGNNY12903CC625C',
u'preview_url': u'http://previews.7digital.com/clips/34/8445818.clip.mp3',
u'release_image': u'http://cdn.7static.com/static/img/sleeveart/00/007/628/0000762838_200.jpg'}
>>>
### Response:
def get_tracks(self, catalog, cache=True):
"""Get the tracks for a song given a catalog.
Args:
catalog (str): a string representing the catalog whose track you want to retrieve.
Returns:
A list of Track dicts.
Example:
>>> s = song.Song('SOWDASQ12A6310F24F')
>>> s.get_tracks('7digital')[0]
{u'catalog': u'7digital',
u'foreign_id': u'7digital:track:8445818',
u'id': u'TRJGNNY12903CC625C',
u'preview_url': u'http://previews.7digital.com/clips/34/8445818.clip.mp3',
u'release_image': u'http://cdn.7static.com/static/img/sleeveart/00/007/628/0000762838_200.jpg'}
>>>
"""
if not (cache and ('tracks' in self.cache) and (catalog in [td['catalog'] for td in self.cache['tracks']])):
kwargs = {
'bucket':['tracks', 'id:%s' % catalog],
}
response = self.get_attribute('profile', **kwargs)
if not 'tracks' in self.cache:
self.cache['tracks'] = []
# don't blow away the cache for other catalogs
potential_tracks = response['songs'][0].get('tracks', [])
existing_track_ids = [tr['foreign_id'] for tr in self.cache['tracks']]
new_tds = filter(lambda tr: tr['foreign_id'] not in existing_track_ids, potential_tracks)
self.cache['tracks'].extend(new_tds)
return filter(lambda tr: tr['catalog']==util.map_idspace(catalog), self.cache['tracks']) |
def collect(self, name, arr):
"""Callback function for collecting layer output NDArrays."""
name = py_str(name)
if self.include_layer is not None and not self.include_layer(name):
return
handle = ctypes.cast(arr, NDArrayHandle)
arr = NDArray(handle, writable=False).copyto(cpu())
if self.logger is not None:
self.logger.info("Collecting layer %s output of shape %s" % (name, arr.shape))
if name in self.nd_dict:
self.nd_dict[name].append(arr)
else:
self.nd_dict[name] = [arr] | Callback function for collecting layer output NDArrays. | Below is the the instruction that describes the task:
### Input:
Callback function for collecting layer output NDArrays.
### Response:
def collect(self, name, arr):
"""Callback function for collecting layer output NDArrays."""
name = py_str(name)
if self.include_layer is not None and not self.include_layer(name):
return
handle = ctypes.cast(arr, NDArrayHandle)
arr = NDArray(handle, writable=False).copyto(cpu())
if self.logger is not None:
self.logger.info("Collecting layer %s output of shape %s" % (name, arr.shape))
if name in self.nd_dict:
self.nd_dict[name].append(arr)
else:
self.nd_dict[name] = [arr] |
def mergecn(args):
"""
%prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another.
"""
p = OptionParser(mergecn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)]
betadir = "beta"
mkdir(betadir)
for seqid in allsomes:
names = [op.join(s + "-cn", "{}.{}.cn".
format(op.basename(s), seqid)) for s in samples]
arrays = [np.fromfile(name, dtype=np.float) for name in names]
shapes = [x.shape[0] for x in arrays]
med_shape = np.median(shapes)
arrays = [x for x in arrays if x.shape[0] == med_shape]
ploidy = 2 if seqid not in ("chrY", "chrM") else 1
if seqid in sexsomes:
chr_med = [np.median([x for x in a if x > 0]) for a in arrays]
chr_med = np.array(chr_med)
idx = get_kmeans(chr_med, k=2)
zero_med = np.median(chr_med[idx == 0])
one_med = np.median(chr_med[idx == 1])
logging.debug("K-means with {} c0:{} c1:{}"
.format(seqid, zero_med, one_med))
higher_idx = 1 if one_med > zero_med else 0
# Use the higher mean coverage componen
arrays = np.array(arrays)[idx == higher_idx]
arrays = [[x] for x in arrays]
ar = np.concatenate(arrays)
print(seqid, ar.shape)
rows, columns = ar.shape
beta = []
std = []
for j in xrange(columns):
a = ar[:, j]
beta.append(np.median(a))
std.append(np.std(a) / np.mean(a))
beta = np.array(beta) / ploidy
betafile = op.join(betadir, "{}.beta".format(seqid))
beta.tofile(betafile)
stdfile = op.join(betadir, "{}.std".format(seqid))
std = np.array(std)
std.tofile(stdfile)
logging.debug("Written to `{}`".format(betafile))
ar.tofile("{}.bin".format(seqid)) | %prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another. | Below is the the instruction that describes the task:
### Input:
%prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another.
### Response:
def mergecn(args):
"""
%prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another.
"""
p = OptionParser(mergecn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)]
betadir = "beta"
mkdir(betadir)
for seqid in allsomes:
names = [op.join(s + "-cn", "{}.{}.cn".
format(op.basename(s), seqid)) for s in samples]
arrays = [np.fromfile(name, dtype=np.float) for name in names]
shapes = [x.shape[0] for x in arrays]
med_shape = np.median(shapes)
arrays = [x for x in arrays if x.shape[0] == med_shape]
ploidy = 2 if seqid not in ("chrY", "chrM") else 1
if seqid in sexsomes:
chr_med = [np.median([x for x in a if x > 0]) for a in arrays]
chr_med = np.array(chr_med)
idx = get_kmeans(chr_med, k=2)
zero_med = np.median(chr_med[idx == 0])
one_med = np.median(chr_med[idx == 1])
logging.debug("K-means with {} c0:{} c1:{}"
.format(seqid, zero_med, one_med))
higher_idx = 1 if one_med > zero_med else 0
# Use the higher mean coverage componen
arrays = np.array(arrays)[idx == higher_idx]
arrays = [[x] for x in arrays]
ar = np.concatenate(arrays)
print(seqid, ar.shape)
rows, columns = ar.shape
beta = []
std = []
for j in xrange(columns):
a = ar[:, j]
beta.append(np.median(a))
std.append(np.std(a) / np.mean(a))
beta = np.array(beta) / ploidy
betafile = op.join(betadir, "{}.beta".format(seqid))
beta.tofile(betafile)
stdfile = op.join(betadir, "{}.std".format(seqid))
std = np.array(std)
std.tofile(stdfile)
logging.debug("Written to `{}`".format(betafile))
ar.tofile("{}.bin".format(seqid)) |
def add_event(self, event):
"""
Adds an IEvent event to this command set.
:param event: an event instance to be added
"""
self._events.append(event)
self._events_by_name[event.get_name] = event | Adds an IEvent event to this command set.
:param event: an event instance to be added | Below is the the instruction that describes the task:
### Input:
Adds an IEvent event to this command set.
:param event: an event instance to be added
### Response:
def add_event(self, event):
"""
Adds an IEvent event to this command set.
:param event: an event instance to be added
"""
self._events.append(event)
self._events_by_name[event.get_name] = event |
def declare_func(self, id_, lineno, type_=None):
""" Declares a function in the current scope.
Checks whether the id exist or not (error if exists).
And creates the entry at the symbol table.
"""
if not self.check_class(id_, 'function', lineno):
entry = self.get_entry(id_) # Must not exist or have _class = None or Function and declared = False
an = 'an' if entry.class_.lower()[0] in 'aeio' else 'a'
syntax_error(lineno, "'%s' already declared as %s %s at %i" % (id_, an, entry.class_, entry.lineno))
return None
entry = self.get_entry(id_) # Must not exist or have _class = None or Function and declared = False
if entry is not None:
if entry.declared and not entry.forwarded:
syntax_error(lineno, "Duplicate function name '%s', previously defined at %i" % (id_, entry.lineno))
return None
if entry.class_ != CLASS.unknown and entry.callable is False: # HINT: Must use is False here.
syntax_error_not_array_nor_func(lineno, id_)
return None
if id_[-1] in DEPRECATED_SUFFIXES and entry.type_ != self.basic_types[SUFFIX_TYPE[id_[-1]]]:
syntax_error_func_type_mismatch(lineno, entry)
if entry.token == 'VAR': # This was a function used in advance
symbols.VAR.to_function(entry, lineno=lineno)
entry.mangled = '%s_%s' % (self.mangle, entry.name) # HINT: mangle for nexted scopes
else:
entry = self.declare(id_, lineno, symbols.FUNCTION(id_, lineno, type_=type_))
if entry.forwarded:
entry.forwared = False # No longer forwarded
old_type = entry.type_ # Remembers the old type
if entry.type_ is not None:
if entry.type_ != old_type:
syntax_error_func_type_mismatch(lineno, entry)
else:
entry.type_ = old_type
else:
entry.params_size = 0 # Size of parameters
entry.locals_size = 0 # Size of local variables
return entry | Declares a function in the current scope.
Checks whether the id exist or not (error if exists).
And creates the entry at the symbol table. | Below is the the instruction that describes the task:
### Input:
Declares a function in the current scope.
Checks whether the id exist or not (error if exists).
And creates the entry at the symbol table.
### Response:
def declare_func(self, id_, lineno, type_=None):
""" Declares a function in the current scope.
Checks whether the id exist or not (error if exists).
And creates the entry at the symbol table.
"""
if not self.check_class(id_, 'function', lineno):
entry = self.get_entry(id_) # Must not exist or have _class = None or Function and declared = False
an = 'an' if entry.class_.lower()[0] in 'aeio' else 'a'
syntax_error(lineno, "'%s' already declared as %s %s at %i" % (id_, an, entry.class_, entry.lineno))
return None
entry = self.get_entry(id_) # Must not exist or have _class = None or Function and declared = False
if entry is not None:
if entry.declared and not entry.forwarded:
syntax_error(lineno, "Duplicate function name '%s', previously defined at %i" % (id_, entry.lineno))
return None
if entry.class_ != CLASS.unknown and entry.callable is False: # HINT: Must use is False here.
syntax_error_not_array_nor_func(lineno, id_)
return None
if id_[-1] in DEPRECATED_SUFFIXES and entry.type_ != self.basic_types[SUFFIX_TYPE[id_[-1]]]:
syntax_error_func_type_mismatch(lineno, entry)
if entry.token == 'VAR': # This was a function used in advance
symbols.VAR.to_function(entry, lineno=lineno)
entry.mangled = '%s_%s' % (self.mangle, entry.name) # HINT: mangle for nexted scopes
else:
entry = self.declare(id_, lineno, symbols.FUNCTION(id_, lineno, type_=type_))
if entry.forwarded:
entry.forwared = False # No longer forwarded
old_type = entry.type_ # Remembers the old type
if entry.type_ is not None:
if entry.type_ != old_type:
syntax_error_func_type_mismatch(lineno, entry)
else:
entry.type_ = old_type
else:
entry.params_size = 0 # Size of parameters
entry.locals_size = 0 # Size of local variables
return entry |
def with_tz(request):
"""
Get the time with TZ enabled
"""
dt = datetime.now()
t = Template('{% load tz %}{% localtime on %}{% get_current_timezone as TIME_ZONE %}{{ TIME_ZONE }}{% endlocaltime %}')
c = RequestContext(request)
response = t.render(c)
return HttpResponse(response) | Get the time with TZ enabled | Below is the the instruction that describes the task:
### Input:
Get the time with TZ enabled
### Response:
def with_tz(request):
"""
Get the time with TZ enabled
"""
dt = datetime.now()
t = Template('{% load tz %}{% localtime on %}{% get_current_timezone as TIME_ZONE %}{{ TIME_ZONE }}{% endlocaltime %}')
c = RequestContext(request)
response = t.render(c)
return HttpResponse(response) |
def place_new_order(self, stock, price, qty, direction, order_type):
"""Place an order for a stock.
https://starfighter.readme.io/docs/place-new-order
"""
url_fragment = 'venues/{venue}/stocks/{stock}/orders'.format(
venue=self.venue,
stock=stock,
)
data = {
"stock": stock,
"price": price,
"venue": self.venue,
"account": self.account,
"qty": qty,
"direction": direction,
"orderType": order_type,
}
url = urljoin(self.base_url, url_fragment)
resp = self.session.post(url, json=data)
return resp.json() | Place an order for a stock.
https://starfighter.readme.io/docs/place-new-order | Below is the the instruction that describes the task:
### Input:
Place an order for a stock.
https://starfighter.readme.io/docs/place-new-order
### Response:
def place_new_order(self, stock, price, qty, direction, order_type):
"""Place an order for a stock.
https://starfighter.readme.io/docs/place-new-order
"""
url_fragment = 'venues/{venue}/stocks/{stock}/orders'.format(
venue=self.venue,
stock=stock,
)
data = {
"stock": stock,
"price": price,
"venue": self.venue,
"account": self.account,
"qty": qty,
"direction": direction,
"orderType": order_type,
}
url = urljoin(self.base_url, url_fragment)
resp = self.session.post(url, json=data)
return resp.json() |
def pretty(price, currency, *, abbrev=True, trim=True):
""" return format price with symbol. Example format(100, 'USD') return '$100'
pretty(price, currency, abbrev=True, trim=False)
abbrev:
True: print value + symbol. Symbol can either be placed before or after value
False: print value + currency code. currency code is placed behind value
trim:
True: trim float value to the maximum digit numbers of that currency
False: keep number of decimal in initial argument """
currency = validate_currency(currency)
price = validate_price(price)
space = '' if nospace(currency) else ' '
fmtstr = ''
if trim:
fmtstr = '{:0,.{x}f}'.format(price, x=decimals(currency)).rstrip('0').rstrip('.')
else:
fmtstr = '{:0,}'.format(price).rstrip('0').rstrip('.')
if abbrev: # use currency symbol
if issuffix(currency):
return fmtstr + space + symbol(currency)
return symbol(currency, native=False) + space + fmtstr
return fmtstr + ' ' + code(currency) | return format price with symbol. Example format(100, 'USD') return '$100'
pretty(price, currency, abbrev=True, trim=False)
abbrev:
True: print value + symbol. Symbol can either be placed before or after value
False: print value + currency code. currency code is placed behind value
trim:
True: trim float value to the maximum digit numbers of that currency
False: keep number of decimal in initial argument | Below is the the instruction that describes the task:
### Input:
return format price with symbol. Example format(100, 'USD') return '$100'
pretty(price, currency, abbrev=True, trim=False)
abbrev:
True: print value + symbol. Symbol can either be placed before or after value
False: print value + currency code. currency code is placed behind value
trim:
True: trim float value to the maximum digit numbers of that currency
False: keep number of decimal in initial argument
### Response:
def pretty(price, currency, *, abbrev=True, trim=True):
""" return format price with symbol. Example format(100, 'USD') return '$100'
pretty(price, currency, abbrev=True, trim=False)
abbrev:
True: print value + symbol. Symbol can either be placed before or after value
False: print value + currency code. currency code is placed behind value
trim:
True: trim float value to the maximum digit numbers of that currency
False: keep number of decimal in initial argument """
currency = validate_currency(currency)
price = validate_price(price)
space = '' if nospace(currency) else ' '
fmtstr = ''
if trim:
fmtstr = '{:0,.{x}f}'.format(price, x=decimals(currency)).rstrip('0').rstrip('.')
else:
fmtstr = '{:0,}'.format(price).rstrip('0').rstrip('.')
if abbrev: # use currency symbol
if issuffix(currency):
return fmtstr + space + symbol(currency)
return symbol(currency, native=False) + space + fmtstr
return fmtstr + ' ' + code(currency) |
def _recv_ack(self, method_frame):
'''Receive an ack from the broker.'''
if self._ack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple = method_frame.args.read_bit()
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._ack_listener(self._last_ack_id)
else:
self._last_ack_id = delivery_tag
self._ack_listener(self._last_ack_id) | Receive an ack from the broker. | Below is the the instruction that describes the task:
### Input:
Receive an ack from the broker.
### Response:
def _recv_ack(self, method_frame):
'''Receive an ack from the broker.'''
if self._ack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple = method_frame.args.read_bit()
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._ack_listener(self._last_ack_id)
else:
self._last_ack_id = delivery_tag
self._ack_listener(self._last_ack_id) |
def bbox(self):
"""BBox"""
return self.left, self.top, self.right, self.bottom | BBox | Below is the the instruction that describes the task:
### Input:
BBox
### Response:
def bbox(self):
"""BBox"""
return self.left, self.top, self.right, self.bottom |
def inverse(d):
"""
reverse the k:v pairs
"""
output = {}
for k, v in unwrap(d).items():
output[v] = output.get(v, [])
output[v].append(k)
return output | reverse the k:v pairs | Below is the the instruction that describes the task:
### Input:
reverse the k:v pairs
### Response:
def inverse(d):
"""
reverse the k:v pairs
"""
output = {}
for k, v in unwrap(d).items():
output[v] = output.get(v, [])
output[v].append(k)
return output |
def del_object_from_parent(self):
""" Delete object from parent object. """
if self.parent:
self.parent.objects.pop(self.ref) | Delete object from parent object. | Below is the the instruction that describes the task:
### Input:
Delete object from parent object.
### Response:
def del_object_from_parent(self):
""" Delete object from parent object. """
if self.parent:
self.parent.objects.pop(self.ref) |
def parse_post(self, response):
'''
根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容,
并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中
:param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象
'''
content = json.loads(response.body.decode(), encoding='UTF-8')
post = response.meta['post']
post['origin_url'] = content.get('share_url', '')
if not all([post['origin_url']]):
raise ValueError('原文地址为空')
post['title'] = html.escape(content.get('title', ''))
if not all([post['title']]):
raise ValueError('文章标题为空 - {}'.format(post.get('origin_url')))
# 单独处理type字段为1的情况,即该文章为站外转发文章
if content.get('type') == 1:
self.logger.warn('遇到站外文章,单独处理 - {}'.format(post['title']))
return post
soup = BeautifulSoup(content.get('body', ''), 'lxml')
author_obj = soup.select('span.author')
self.logger.debug(author_obj)
if author_obj:
author_list = []
for author in author_obj:
author_list.append(
author.string.rstrip(',, ').replace(',', ','))
author_list = list(set(author_list))
post['author'] = html.escape(','.join(author_list))
post['content'] = str(soup.div)
# 继续填充post数据
image_back = content.get('images', [None])[0]
if image_back:
post['meta']['moear.cover_image_slug'] = \
content.get('image', image_back)
self.logger.debug(post) | 根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容,
并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中
:param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象 | Below is the the instruction that describes the task:
### Input:
根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容,
并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中
:param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象
### Response:
def parse_post(self, response):
'''
根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容,
并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中
:param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象
'''
content = json.loads(response.body.decode(), encoding='UTF-8')
post = response.meta['post']
post['origin_url'] = content.get('share_url', '')
if not all([post['origin_url']]):
raise ValueError('原文地址为空')
post['title'] = html.escape(content.get('title', ''))
if not all([post['title']]):
raise ValueError('文章标题为空 - {}'.format(post.get('origin_url')))
# 单独处理type字段为1的情况,即该文章为站外转发文章
if content.get('type') == 1:
self.logger.warn('遇到站外文章,单独处理 - {}'.format(post['title']))
return post
soup = BeautifulSoup(content.get('body', ''), 'lxml')
author_obj = soup.select('span.author')
self.logger.debug(author_obj)
if author_obj:
author_list = []
for author in author_obj:
author_list.append(
author.string.rstrip(',, ').replace(',', ','))
author_list = list(set(author_list))
post['author'] = html.escape(','.join(author_list))
post['content'] = str(soup.div)
# 继续填充post数据
image_back = content.get('images', [None])[0]
if image_back:
post['meta']['moear.cover_image_slug'] = \
content.get('image', image_back)
self.logger.debug(post) |
def _call_and_store(getter_func, data, field_name, error_store, index=None):
"""Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`.
"""
try:
value = getter_func(data)
except ValidationError as err:
error_store.store_error(err.messages, field_name, index=index)
# When a Nested field fails validation, the marshalled data is stored
# on the ValidationError's valid_data attribute
return err.valid_data or missing
return value | Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`. | Below is the the instruction that describes the task:
### Input:
Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`.
### Response:
def _call_and_store(getter_func, data, field_name, error_store, index=None):
"""Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`.
"""
try:
value = getter_func(data)
except ValidationError as err:
error_store.store_error(err.messages, field_name, index=index)
# When a Nested field fails validation, the marshalled data is stored
# on the ValidationError's valid_data attribute
return err.valid_data or missing
return value |
def fetchall(self):
"""Fetch all rows."""
result = self.query.result()
return [row.values() for row in result] | Fetch all rows. | Below is the the instruction that describes the task:
### Input:
Fetch all rows.
### Response:
def fetchall(self):
"""Fetch all rows."""
result = self.query.result()
return [row.values() for row in result] |
async def start(request):
"""
Begins the session manager for factory calibration, if a session is not
already in progress, or if the "force" key is specified in the request. To
force, use the following body:
{
"force": true
}
:return: The current session ID token or an error message
"""
global session
try:
body = await request.json()
except json.decoder.JSONDecodeError:
# Body will be null for requests without parameters (normal operation)
log.debug("No body in {}".format(request))
body = {}
if not session or body.get('force'):
hardware = hw_from_req(request)
if body.get('force') and session:
await release(data={})
session = SessionManager(hardware)
res = init_pipette()
if res:
status = 201
data = {'token': session.id, 'pipette': res}
else:
session = None
status = 403
data = {'message': 'Error, pipette not recognized'}
else:
data = {'message': 'Error, session in progress. Use "force" key in'
' request body to override'}
status = 409
return web.json_response(data, status=status) | Begins the session manager for factory calibration, if a session is not
already in progress, or if the "force" key is specified in the request. To
force, use the following body:
{
"force": true
}
:return: The current session ID token or an error message | Below is the the instruction that describes the task:
### Input:
Begins the session manager for factory calibration, if a session is not
already in progress, or if the "force" key is specified in the request. To
force, use the following body:
{
"force": true
}
:return: The current session ID token or an error message
### Response:
async def start(request):
"""
Begins the session manager for factory calibration, if a session is not
already in progress, or if the "force" key is specified in the request. To
force, use the following body:
{
"force": true
}
:return: The current session ID token or an error message
"""
global session
try:
body = await request.json()
except json.decoder.JSONDecodeError:
# Body will be null for requests without parameters (normal operation)
log.debug("No body in {}".format(request))
body = {}
if not session or body.get('force'):
hardware = hw_from_req(request)
if body.get('force') and session:
await release(data={})
session = SessionManager(hardware)
res = init_pipette()
if res:
status = 201
data = {'token': session.id, 'pipette': res}
else:
session = None
status = 403
data = {'message': 'Error, pipette not recognized'}
else:
data = {'message': 'Error, session in progress. Use "force" key in'
' request body to override'}
status = 409
return web.json_response(data, status=status) |
def parse(self, data):
"""Returns a list of path template segments parsed from data.
Args:
data: A path template string.
Returns:
A list of _Segment.
"""
self.binding_var_count = 0
self.segment_count = 0
segments = self.parser.parse(data)
# Validation step: checks that there are no nested bindings.
path_wildcard = False
for segment in segments:
if segment.kind == _TERMINAL and segment.literal == '**':
if path_wildcard:
raise ValidationException(
'validation error: path template cannot contain more '
'than one path wildcard')
path_wildcard = True
return segments | Returns a list of path template segments parsed from data.
Args:
data: A path template string.
Returns:
A list of _Segment. | Below is the the instruction that describes the task:
### Input:
Returns a list of path template segments parsed from data.
Args:
data: A path template string.
Returns:
A list of _Segment.
### Response:
def parse(self, data):
"""Returns a list of path template segments parsed from data.
Args:
data: A path template string.
Returns:
A list of _Segment.
"""
self.binding_var_count = 0
self.segment_count = 0
segments = self.parser.parse(data)
# Validation step: checks that there are no nested bindings.
path_wildcard = False
for segment in segments:
if segment.kind == _TERMINAL and segment.literal == '**':
if path_wildcard:
raise ValidationException(
'validation error: path template cannot contain more '
'than one path wildcard')
path_wildcard = True
return segments |
def get_client(client_id):
"""Load the client.
Needed for grant_type client_credentials.
Add support for OAuth client_credentials access type, with user
inactivation support.
:param client_id: The client ID.
:returns: The client instance or ``None``.
"""
client = Client.query.get(client_id)
if client and client.user.active:
return client | Load the client.
Needed for grant_type client_credentials.
Add support for OAuth client_credentials access type, with user
inactivation support.
:param client_id: The client ID.
:returns: The client instance or ``None``. | Below is the the instruction that describes the task:
### Input:
Load the client.
Needed for grant_type client_credentials.
Add support for OAuth client_credentials access type, with user
inactivation support.
:param client_id: The client ID.
:returns: The client instance or ``None``.
### Response:
def get_client(client_id):
"""Load the client.
Needed for grant_type client_credentials.
Add support for OAuth client_credentials access type, with user
inactivation support.
:param client_id: The client ID.
:returns: The client instance or ``None``.
"""
client = Client.query.get(client_id)
if client and client.user.active:
return client |
def getMechanismName(self):
"""Return the authentication mechanism name."""
if self._server_side:
mech = self._authenticator.current_mech
return mech.getMechanismName() if mech else None
else:
return getattr(self._authenticator, 'authMech', None) | Return the authentication mechanism name. | Below is the the instruction that describes the task:
### Input:
Return the authentication mechanism name.
### Response:
def getMechanismName(self):
"""Return the authentication mechanism name."""
if self._server_side:
mech = self._authenticator.current_mech
return mech.getMechanismName() if mech else None
else:
return getattr(self._authenticator, 'authMech', None) |
def write_object_array(f, data, options):
""" Writes an array of objects recursively.
Writes the elements of the given object array recursively in the
HDF5 Group ``options.group_for_references`` and returns an
``h5py.Reference`` array to all the elements.
Parameters
----------
f : h5py.File
The HDF5 file handle that is open.
data : numpy.ndarray of objects
Numpy object array to write the elements of.
options : hdf5storage.core.Options
hdf5storage options object.
Returns
-------
obj_array : numpy.ndarray of h5py.Reference
A reference array pointing to all the elements written to the
HDF5 file. For those that couldn't be written, the respective
element points to the canonical empty.
Raises
------
TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to ``'error'``.
See Also
--------
read_object_array
hdf5storage.Options.group_for_references
h5py.Reference
"""
# We need to grab the special reference dtype and make an empty
# array to store all the references in.
ref_dtype = h5py.special_dtype(ref=h5py.Reference)
data_refs = np.zeros(shape=data.shape, dtype='object')
# We need to make sure that the group to hold references is present,
# and create it if it isn't.
if options.group_for_references not in f:
f.create_group(options.group_for_references)
grp2 = f[options.group_for_references]
if not isinstance(grp2, h5py.Group):
del f[options.group_for_references]
f.create_group(options.group_for_references)
grp2 = f[options.group_for_references]
# The Dataset 'a' needs to be present as the canonical empty. It is
# just and np.uint32/64([0, 0]) with its a MATLAB_class of
# 'canonical empty' and the 'MATLAB_empty' attribute set. If it
# isn't present or is incorrectly formatted, it is created
# truncating anything previously there.
try:
dset_a = grp2['a']
if dset_a.shape != (2,) \
or not dset_a.dtype.name.startswith('uint') \
or np.any(dset_a[...] != np.uint64([0, 0])) \
or get_attribute_string(dset_a, 'MATLAB_class') != \
'canonical empty' \
or get_attribute(dset_a, 'MATLAB_empty') != 1:
del grp2['a']
dset_a = grp2.create_dataset('a', data=np.uint64([0, 0]))
set_attribute_string(dset_a, 'MATLAB_class',
'canonical empty')
set_attribute(dset_a, 'MATLAB_empty',
np.uint8(1))
except:
dset_a = grp2.create_dataset('a', data=np.uint64([0, 0]))
set_attribute_string(dset_a, 'MATLAB_class',
'canonical empty')
set_attribute(dset_a, 'MATLAB_empty',
np.uint8(1))
# Go through all the elements of data and write them, gabbing their
# references and putting them in data_refs. They will be put in
# group_for_references, which is also what the H5PATH needs to be
# set to if we are doing MATLAB compatibility (otherwise, the
# attribute needs to be deleted). If an element can't be written
# (doing matlab compatibility, but it isn't compatible with matlab
# and action_for_matlab_incompatible option is True), the reference
# to the canonical empty will be used for the reference array to
# point to.
grp2name = grp2.name
for index, x in np.ndenumerate(data):
name_for_ref = next_unused_name_in_group(grp2, 16)
write_data(f, grp2, name_for_ref, x, None, options)
try:
dset = grp2[name_for_ref]
data_refs[index] = dset.ref
if options.matlab_compatible:
set_attribute_string(dset,
'H5PATH', grp2name)
else:
del_attribute(dset, 'H5PATH')
except:
data_refs[index] = dset_a.ref
# Now, the dtype needs to be changed to the reference type and the
# whole thing copied over to data_to_store.
return data_refs.astype(ref_dtype).copy() | Writes an array of objects recursively.
Writes the elements of the given object array recursively in the
HDF5 Group ``options.group_for_references`` and returns an
``h5py.Reference`` array to all the elements.
Parameters
----------
f : h5py.File
The HDF5 file handle that is open.
data : numpy.ndarray of objects
Numpy object array to write the elements of.
options : hdf5storage.core.Options
hdf5storage options object.
Returns
-------
obj_array : numpy.ndarray of h5py.Reference
A reference array pointing to all the elements written to the
HDF5 file. For those that couldn't be written, the respective
element points to the canonical empty.
Raises
------
TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to ``'error'``.
See Also
--------
read_object_array
hdf5storage.Options.group_for_references
h5py.Reference | Below is the the instruction that describes the task:
### Input:
Writes an array of objects recursively.
Writes the elements of the given object array recursively in the
HDF5 Group ``options.group_for_references`` and returns an
``h5py.Reference`` array to all the elements.
Parameters
----------
f : h5py.File
The HDF5 file handle that is open.
data : numpy.ndarray of objects
Numpy object array to write the elements of.
options : hdf5storage.core.Options
hdf5storage options object.
Returns
-------
obj_array : numpy.ndarray of h5py.Reference
A reference array pointing to all the elements written to the
HDF5 file. For those that couldn't be written, the respective
element points to the canonical empty.
Raises
------
TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to ``'error'``.
See Also
--------
read_object_array
hdf5storage.Options.group_for_references
h5py.Reference
### Response:
def write_object_array(f, data, options):
""" Writes an array of objects recursively.
Writes the elements of the given object array recursively in the
HDF5 Group ``options.group_for_references`` and returns an
``h5py.Reference`` array to all the elements.
Parameters
----------
f : h5py.File
The HDF5 file handle that is open.
data : numpy.ndarray of objects
Numpy object array to write the elements of.
options : hdf5storage.core.Options
hdf5storage options object.
Returns
-------
obj_array : numpy.ndarray of h5py.Reference
A reference array pointing to all the elements written to the
HDF5 file. For those that couldn't be written, the respective
element points to the canonical empty.
Raises
------
TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to ``'error'``.
See Also
--------
read_object_array
hdf5storage.Options.group_for_references
h5py.Reference
"""
# We need to grab the special reference dtype and make an empty
# array to store all the references in.
ref_dtype = h5py.special_dtype(ref=h5py.Reference)
data_refs = np.zeros(shape=data.shape, dtype='object')
# We need to make sure that the group to hold references is present,
# and create it if it isn't.
if options.group_for_references not in f:
f.create_group(options.group_for_references)
grp2 = f[options.group_for_references]
if not isinstance(grp2, h5py.Group):
del f[options.group_for_references]
f.create_group(options.group_for_references)
grp2 = f[options.group_for_references]
# The Dataset 'a' needs to be present as the canonical empty. It is
# just and np.uint32/64([0, 0]) with its a MATLAB_class of
# 'canonical empty' and the 'MATLAB_empty' attribute set. If it
# isn't present or is incorrectly formatted, it is created
# truncating anything previously there.
try:
dset_a = grp2['a']
if dset_a.shape != (2,) \
or not dset_a.dtype.name.startswith('uint') \
or np.any(dset_a[...] != np.uint64([0, 0])) \
or get_attribute_string(dset_a, 'MATLAB_class') != \
'canonical empty' \
or get_attribute(dset_a, 'MATLAB_empty') != 1:
del grp2['a']
dset_a = grp2.create_dataset('a', data=np.uint64([0, 0]))
set_attribute_string(dset_a, 'MATLAB_class',
'canonical empty')
set_attribute(dset_a, 'MATLAB_empty',
np.uint8(1))
except:
dset_a = grp2.create_dataset('a', data=np.uint64([0, 0]))
set_attribute_string(dset_a, 'MATLAB_class',
'canonical empty')
set_attribute(dset_a, 'MATLAB_empty',
np.uint8(1))
# Go through all the elements of data and write them, gabbing their
# references and putting them in data_refs. They will be put in
# group_for_references, which is also what the H5PATH needs to be
# set to if we are doing MATLAB compatibility (otherwise, the
# attribute needs to be deleted). If an element can't be written
# (doing matlab compatibility, but it isn't compatible with matlab
# and action_for_matlab_incompatible option is True), the reference
# to the canonical empty will be used for the reference array to
# point to.
grp2name = grp2.name
for index, x in np.ndenumerate(data):
name_for_ref = next_unused_name_in_group(grp2, 16)
write_data(f, grp2, name_for_ref, x, None, options)
try:
dset = grp2[name_for_ref]
data_refs[index] = dset.ref
if options.matlab_compatible:
set_attribute_string(dset,
'H5PATH', grp2name)
else:
del_attribute(dset, 'H5PATH')
except:
data_refs[index] = dset_a.ref
# Now, the dtype needs to be changed to the reference type and the
# whole thing copied over to data_to_store.
return data_refs.astype(ref_dtype).copy() |
def train(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None):
"""Blocking version of train_async(). The only difference is that it blocks the caller
until the job finishes, and it does not have a return value.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
job = train_async(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud)
job.wait()
print(job.state) | Blocking version of train_async(). The only difference is that it blocks the caller
until the job finishes, and it does not have a return value. | Below is the the instruction that describes the task:
### Input:
Blocking version of train_async(). The only difference is that it blocks the caller
until the job finishes, and it does not have a return value.
### Response:
def train(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None):
"""Blocking version of train_async(). The only difference is that it blocks the caller
until the job finishes, and it does not have a return value.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
job = train_async(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud)
job.wait()
print(job.state) |
def validate_arg_values(ast, bo):
"""Recursively validate arg (NSArg and StrArg) values
Check that NSArgs are found in BELbio API and match appropriate entity_type.
Check that StrArgs match their value - either default namespace or regex string
Generate a WARNING if not.
Args:
bo: bel object
Returns:
bel object
"""
if not bo.api_url:
log.info("No API endpoint defined")
return bo
log.debug(f"AST: {ast}")
# Test NSArg terms
if isinstance(ast, NSArg):
term_id = "{}:{}".format(ast.namespace, ast.value)
value_types = ast.value_types
log.debug(f"Value types: {value_types} AST value: {ast.value}")
# Default namespaces are defined in the bel_specification file
if ast.namespace == "DEFAULT": # may use the DEFAULT namespace or not
for value_type in value_types:
default_namespace = [
ns["name"] for ns in bo.spec["namespaces"][value_type]["info"]
] + [
ns["abbreviation"]
for ns in bo.spec["namespaces"][value_type]["info"]
]
if ast.value in default_namespace:
log.debug("Default namespace valid term: {}".format(term_id))
break
else: # if for loop doesn't hit the break, run this else
log.debug("Default namespace invalid term: {}".format(term_id))
bo.validation_messages.append(
("WARNING", f"Default Term: {term_id} not found")
)
# Process normal, non-default-namespace terms
else:
request_url = bo.api_url + "/terms/{}".format(
url_path_param_quoting(term_id)
)
log.info(f"Validate Arg Values url {request_url}")
r = get_url(request_url)
if r and r.status_code == 200:
result = r.json()
# function signature term value_types doesn't match up with API term entity_types
log.debug(
f'AST.value_types {ast.value_types} Entity types {result.get("entity_types", [])}'
)
# Check that entity types match
if (
len(
set(ast.value_types).intersection(
result.get("entity_types", [])
)
)
== 0
):
log.debug(
"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format(
term_id, ast.value_types, result.get("entity_types", [])
)
)
bo.validation_messages.append(
(
"WARNING",
"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format(
term_id, ast.value_types, result.get("entity_types", [])
),
)
)
if term_id in result.get("obsolete_ids", []):
bo.validation_messages.append(
(
"WARNING",
f'Obsolete term: {term_id} Current term: {result["id"]}',
)
)
elif r.status_code == 404:
bo.validation_messages.append(
("WARNING", f"Term: {term_id} not found in namespace")
)
else:
log.error(f"Status {r.status_code} - Bad URL: {request_url}")
# Process StrArgs
if isinstance(ast, StrArg):
log.debug(f" Check String Arg: {ast.value} {ast.value_types}")
for value_type in ast.value_types:
# Is this a regex to match against
if re.match("/", value_type):
value_type = re.sub("^/", "", value_type)
value_type = re.sub("/$", "", value_type)
match = re.match(value_type, ast.value)
if match:
break
if value_type in bo.spec["namespaces"]:
default_namespace = [
ns["name"] for ns in bo.spec["namespaces"][value_type]["info"]
] + [
ns["abbreviation"]
for ns in bo.spec["namespaces"][value_type]["info"]
]
if ast.value in default_namespace:
break
else: # If for loop doesn't hit the break, no matches found, therefore for StrArg value is bad
bo.validation_messages.append(
(
"WARNING",
f"String value {ast.value} does not match default namespace value or regex pattern: {ast.value_types}",
)
)
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
validate_arg_values(arg, bo)
return bo | Recursively validate arg (NSArg and StrArg) values
Check that NSArgs are found in BELbio API and match appropriate entity_type.
Check that StrArgs match their value - either default namespace or regex string
Generate a WARNING if not.
Args:
bo: bel object
Returns:
bel object | Below is the the instruction that describes the task:
### Input:
Recursively validate arg (NSArg and StrArg) values
Check that NSArgs are found in BELbio API and match appropriate entity_type.
Check that StrArgs match their value - either default namespace or regex string
Generate a WARNING if not.
Args:
bo: bel object
Returns:
bel object
### Response:
def validate_arg_values(ast, bo):
"""Recursively validate arg (NSArg and StrArg) values
Check that NSArgs are found in BELbio API and match appropriate entity_type.
Check that StrArgs match their value - either default namespace or regex string
Generate a WARNING if not.
Args:
bo: bel object
Returns:
bel object
"""
if not bo.api_url:
log.info("No API endpoint defined")
return bo
log.debug(f"AST: {ast}")
# Test NSArg terms
if isinstance(ast, NSArg):
term_id = "{}:{}".format(ast.namespace, ast.value)
value_types = ast.value_types
log.debug(f"Value types: {value_types} AST value: {ast.value}")
# Default namespaces are defined in the bel_specification file
if ast.namespace == "DEFAULT": # may use the DEFAULT namespace or not
for value_type in value_types:
default_namespace = [
ns["name"] for ns in bo.spec["namespaces"][value_type]["info"]
] + [
ns["abbreviation"]
for ns in bo.spec["namespaces"][value_type]["info"]
]
if ast.value in default_namespace:
log.debug("Default namespace valid term: {}".format(term_id))
break
else: # if for loop doesn't hit the break, run this else
log.debug("Default namespace invalid term: {}".format(term_id))
bo.validation_messages.append(
("WARNING", f"Default Term: {term_id} not found")
)
# Process normal, non-default-namespace terms
else:
request_url = bo.api_url + "/terms/{}".format(
url_path_param_quoting(term_id)
)
log.info(f"Validate Arg Values url {request_url}")
r = get_url(request_url)
if r and r.status_code == 200:
result = r.json()
# function signature term value_types doesn't match up with API term entity_types
log.debug(
f'AST.value_types {ast.value_types} Entity types {result.get("entity_types", [])}'
)
# Check that entity types match
if (
len(
set(ast.value_types).intersection(
result.get("entity_types", [])
)
)
== 0
):
log.debug(
"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format(
term_id, ast.value_types, result.get("entity_types", [])
)
)
bo.validation_messages.append(
(
"WARNING",
"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format(
term_id, ast.value_types, result.get("entity_types", [])
),
)
)
if term_id in result.get("obsolete_ids", []):
bo.validation_messages.append(
(
"WARNING",
f'Obsolete term: {term_id} Current term: {result["id"]}',
)
)
elif r.status_code == 404:
bo.validation_messages.append(
("WARNING", f"Term: {term_id} not found in namespace")
)
else:
log.error(f"Status {r.status_code} - Bad URL: {request_url}")
# Process StrArgs
if isinstance(ast, StrArg):
log.debug(f" Check String Arg: {ast.value} {ast.value_types}")
for value_type in ast.value_types:
# Is this a regex to match against
if re.match("/", value_type):
value_type = re.sub("^/", "", value_type)
value_type = re.sub("/$", "", value_type)
match = re.match(value_type, ast.value)
if match:
break
if value_type in bo.spec["namespaces"]:
default_namespace = [
ns["name"] for ns in bo.spec["namespaces"][value_type]["info"]
] + [
ns["abbreviation"]
for ns in bo.spec["namespaces"][value_type]["info"]
]
if ast.value in default_namespace:
break
else: # If for loop doesn't hit the break, no matches found, therefore for StrArg value is bad
bo.validation_messages.append(
(
"WARNING",
f"String value {ast.value} does not match default namespace value or regex pattern: {ast.value_types}",
)
)
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
validate_arg_values(arg, bo)
return bo |
def get_num_sig(self, alpha=0.05):
"""Print the number of significant results using various metrics."""
# Get the number of significant GO terms
ctr = cx.Counter()
flds = set(['FDR', 'Bonferroni', 'Benjamini', 'PValue'])
for ntd in self.nts:
for fld in flds:
if getattr(ntd, fld) < alpha:
ctr[fld] += 1
return ctr | Print the number of significant results using various metrics. | Below is the the instruction that describes the task:
### Input:
Print the number of significant results using various metrics.
### Response:
def get_num_sig(self, alpha=0.05):
"""Print the number of significant results using various metrics."""
# Get the number of significant GO terms
ctr = cx.Counter()
flds = set(['FDR', 'Bonferroni', 'Benjamini', 'PValue'])
for ntd in self.nts:
for fld in flds:
if getattr(ntd, fld) < alpha:
ctr[fld] += 1
return ctr |
def limit(self, limit):
"""
Set absolute limit on number of images to return, or set to None to return
as many results as needed; default 50 posts.
"""
params = join_params(self.parameters, {"limit": limit})
return self.__class__(**params) | Set absolute limit on number of images to return, or set to None to return
as many results as needed; default 50 posts. | Below is the the instruction that describes the task:
### Input:
Set absolute limit on number of images to return, or set to None to return
as many results as needed; default 50 posts.
### Response:
def limit(self, limit):
"""
Set absolute limit on number of images to return, or set to None to return
as many results as needed; default 50 posts.
"""
params = join_params(self.parameters, {"limit": limit})
return self.__class__(**params) |
def _gettype(self):
'''
Return current type of this struct
:returns: a typedef object (e.g. nstruct)
'''
current = self
lastname = getattr(current._parser, 'typedef', None)
while hasattr(current, '_sub'):
current = current._sub
tn = getattr(current._parser, 'typedef', None)
if tn is not None:
lastname = tn
return lastname | Return current type of this struct
:returns: a typedef object (e.g. nstruct) | Below is the the instruction that describes the task:
### Input:
Return current type of this struct
:returns: a typedef object (e.g. nstruct)
### Response:
def _gettype(self):
'''
Return current type of this struct
:returns: a typedef object (e.g. nstruct)
'''
current = self
lastname = getattr(current._parser, 'typedef', None)
while hasattr(current, '_sub'):
current = current._sub
tn = getattr(current._parser, 'typedef', None)
if tn is not None:
lastname = tn
return lastname |
def wo_resp(self, resp):
"""
can override for other style
"""
if self._data is not None:
resp['res'] = self.to_str(self._data)
return self.wo_json(resp) | can override for other style | Below is the the instruction that describes the task:
### Input:
can override for other style
### Response:
def wo_resp(self, resp):
"""
can override for other style
"""
if self._data is not None:
resp['res'] = self.to_str(self._data)
return self.wo_json(resp) |
def set_attrs(self, username, attrs):
""" set user attributes"""
ldap_client = self._bind()
tmp = self._get_user(self._byte_p2(username), ALL_ATTRS)
if tmp is None:
raise UserDoesntExist(username, self.backend_name)
dn = self._byte_p2(tmp[0])
old_attrs = tmp[1]
for attr in attrs:
bcontent = self._byte_p2(attrs[attr])
battr = self._byte_p2(attr)
new = {battr: self._modlist(self._byte_p3(bcontent))}
# if attr is dn entry, use rename
if attr.lower() == self.dn_user_attr.lower():
ldap_client.rename_s(
dn,
ldap.dn.dn2str([[(battr, bcontent, 1)]])
)
dn = ldap.dn.dn2str(
[[(battr, bcontent, 1)]] + ldap.dn.str2dn(dn)[1:]
)
else:
# if attr is already set, replace the value
# (see dict old passed to modifyModlist)
if attr in old_attrs:
if type(old_attrs[attr]) is list:
tmp = []
for value in old_attrs[attr]:
tmp.append(self._byte_p2(value))
bold_value = tmp
else:
bold_value = self._modlist(
self._byte_p3(old_attrs[attr])
)
old = {battr: bold_value}
# attribute is not set, just add it
else:
old = {}
ldif = modlist.modifyModlist(old, new)
if ldif:
try:
ldap_client.modify_s(dn, ldif)
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e)
ldap_client.unbind_s() | set user attributes | Below is the the instruction that describes the task:
### Input:
set user attributes
### Response:
def set_attrs(self, username, attrs):
""" set user attributes"""
ldap_client = self._bind()
tmp = self._get_user(self._byte_p2(username), ALL_ATTRS)
if tmp is None:
raise UserDoesntExist(username, self.backend_name)
dn = self._byte_p2(tmp[0])
old_attrs = tmp[1]
for attr in attrs:
bcontent = self._byte_p2(attrs[attr])
battr = self._byte_p2(attr)
new = {battr: self._modlist(self._byte_p3(bcontent))}
# if attr is dn entry, use rename
if attr.lower() == self.dn_user_attr.lower():
ldap_client.rename_s(
dn,
ldap.dn.dn2str([[(battr, bcontent, 1)]])
)
dn = ldap.dn.dn2str(
[[(battr, bcontent, 1)]] + ldap.dn.str2dn(dn)[1:]
)
else:
# if attr is already set, replace the value
# (see dict old passed to modifyModlist)
if attr in old_attrs:
if type(old_attrs[attr]) is list:
tmp = []
for value in old_attrs[attr]:
tmp.append(self._byte_p2(value))
bold_value = tmp
else:
bold_value = self._modlist(
self._byte_p3(old_attrs[attr])
)
old = {battr: bold_value}
# attribute is not set, just add it
else:
old = {}
ldif = modlist.modifyModlist(old, new)
if ldif:
try:
ldap_client.modify_s(dn, ldif)
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e)
ldap_client.unbind_s() |
def to_graph_tool(self):
'''Converts this Graph object to a graph_tool-compatible object.
Requires the graph_tool library.
Note that the internal ordering of graph_tool seems to be column-major.'''
# Import here to avoid ImportErrors when graph_tool isn't available.
import graph_tool
gt = graph_tool.Graph(directed=self.is_directed())
gt.add_edge_list(self.pairs())
if self.is_weighted():
weights = gt.new_edge_property('double')
for e,w in zip(gt.edges(), self.edge_weights()):
weights[e] = w
gt.edge_properties['weight'] = weights
return gt | Converts this Graph object to a graph_tool-compatible object.
Requires the graph_tool library.
Note that the internal ordering of graph_tool seems to be column-major. | Below is the the instruction that describes the task:
### Input:
Converts this Graph object to a graph_tool-compatible object.
Requires the graph_tool library.
Note that the internal ordering of graph_tool seems to be column-major.
### Response:
def to_graph_tool(self):
'''Converts this Graph object to a graph_tool-compatible object.
Requires the graph_tool library.
Note that the internal ordering of graph_tool seems to be column-major.'''
# Import here to avoid ImportErrors when graph_tool isn't available.
import graph_tool
gt = graph_tool.Graph(directed=self.is_directed())
gt.add_edge_list(self.pairs())
if self.is_weighted():
weights = gt.new_edge_property('double')
for e,w in zip(gt.edges(), self.edge_weights()):
weights[e] = w
gt.edge_properties['weight'] = weights
return gt |
def rfdist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0):
"""
Distributed version of tree_distance.rfdist
Parameters: two valid newick strings and a boolean
"""
tree_a = Tree(newick_string_a)
tree_b = Tree(newick_string_b)
return treedist.rfdist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value) | Distributed version of tree_distance.rfdist
Parameters: two valid newick strings and a boolean | Below is the the instruction that describes the task:
### Input:
Distributed version of tree_distance.rfdist
Parameters: two valid newick strings and a boolean
### Response:
def rfdist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0):
"""
Distributed version of tree_distance.rfdist
Parameters: two valid newick strings and a boolean
"""
tree_a = Tree(newick_string_a)
tree_b = Tree(newick_string_b)
return treedist.rfdist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value) |
def sanitize(s, strict=True):
"""
Sanitize a string.
Spaces are converted to underscore; if strict=True they are then removed.
Parameters
----------
s : str
String to sanitize
strict : bool
If True, only alphanumeric characters are allowed. If False, a limited
set of additional characters (-._) will be allowed.
"""
allowed = ''.join(
[
'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'abcdefghijklmnopqrstuvwxyz',
'0123456789',
]
)
if not strict:
allowed += '-_.'
s = str(s).replace(' ', '_')
return ''.join([i for i in s if i in allowed]) | Sanitize a string.
Spaces are converted to underscore; if strict=True they are then removed.
Parameters
----------
s : str
String to sanitize
strict : bool
If True, only alphanumeric characters are allowed. If False, a limited
set of additional characters (-._) will be allowed. | Below is the the instruction that describes the task:
### Input:
Sanitize a string.
Spaces are converted to underscore; if strict=True they are then removed.
Parameters
----------
s : str
String to sanitize
strict : bool
If True, only alphanumeric characters are allowed. If False, a limited
set of additional characters (-._) will be allowed.
### Response:
def sanitize(s, strict=True):
"""
Sanitize a string.
Spaces are converted to underscore; if strict=True they are then removed.
Parameters
----------
s : str
String to sanitize
strict : bool
If True, only alphanumeric characters are allowed. If False, a limited
set of additional characters (-._) will be allowed.
"""
allowed = ''.join(
[
'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'abcdefghijklmnopqrstuvwxyz',
'0123456789',
]
)
if not strict:
allowed += '-_.'
s = str(s).replace(' ', '_')
return ''.join([i for i in s if i in allowed]) |
def permutation_entropy(x, n, tau):
"""Compute Permutation Entropy of a given time series x, specified by
permutation order n and embedding lag tau.
Parameters
----------
x
list
a time series
n
integer
Permutation order
tau
integer
Embedding lag
Returns
----------
PE
float
permutation entropy
Notes
----------
Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)].
We first build embedding matrix Em, of dimension(n*N-n+1),
such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence
the embedding lag and the embedding dimension are 1 and n
respectively. We build this matrix from a given time series,
X, by calling pyEEg function embed_seq(x,1,n).
We then transform each row of the embedding matrix into
a new sequence, comprising a set of integers in range of 0,..,n-1.
The order in which the integers are placed within a row is the
same as those of the original elements:0 is placed where the smallest
element of the row was and n-1 replaces the largest element of the row.
To calculate the Permutation entropy, we calculate the entropy of PeSeq.
In doing so, we count the number of occurrences of each permutation
in PeSeq and write it in a sequence, RankMat. We then use this sequence to
calculate entropy by using Shannon's entropy formula.
Permutation entropy is usually calculated with n in range of 3 and 7.
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural
complexity measure for time series." Physical Review Letters 88.17
(2002): 174102.
Examples
----------
>>> import pyeeg
>>> x = [1,2,4,5,12,3,4,5]
>>> pyeeg.permutation_entropy(x,5,1)
2.0
"""
PeSeq = []
Em = embed_seq(x, tau, n)
for i in range(0, len(Em)):
r = []
z = []
for j in range(0, len(Em[i])):
z.append(Em[i][j])
for j in range(0, len(Em[i])):
z.sort()
r.append(z.index(Em[i][j]))
z[z.index(Em[i][j])] = -1
PeSeq.append(r)
RankMat = []
while len(PeSeq) > 0:
RankMat.append(PeSeq.count(PeSeq[0]))
x = PeSeq[0]
for j in range(0, PeSeq.count(PeSeq[0])):
PeSeq.pop(PeSeq.index(x))
RankMat = numpy.array(RankMat)
RankMat = numpy.true_divide(RankMat, RankMat.sum())
EntropyMat = numpy.multiply(numpy.log2(RankMat), RankMat)
PE = -1 * EntropyMat.sum()
return PE | Compute Permutation Entropy of a given time series x, specified by
permutation order n and embedding lag tau.
Parameters
----------
x
list
a time series
n
integer
Permutation order
tau
integer
Embedding lag
Returns
----------
PE
float
permutation entropy
Notes
----------
Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)].
We first build embedding matrix Em, of dimension(n*N-n+1),
such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence
the embedding lag and the embedding dimension are 1 and n
respectively. We build this matrix from a given time series,
X, by calling pyEEg function embed_seq(x,1,n).
We then transform each row of the embedding matrix into
a new sequence, comprising a set of integers in range of 0,..,n-1.
The order in which the integers are placed within a row is the
same as those of the original elements:0 is placed where the smallest
element of the row was and n-1 replaces the largest element of the row.
To calculate the Permutation entropy, we calculate the entropy of PeSeq.
In doing so, we count the number of occurrences of each permutation
in PeSeq and write it in a sequence, RankMat. We then use this sequence to
calculate entropy by using Shannon's entropy formula.
Permutation entropy is usually calculated with n in range of 3 and 7.
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural
complexity measure for time series." Physical Review Letters 88.17
(2002): 174102.
Examples
----------
>>> import pyeeg
>>> x = [1,2,4,5,12,3,4,5]
>>> pyeeg.permutation_entropy(x,5,1)
2.0 | Below is the the instruction that describes the task:
### Input:
Compute Permutation Entropy of a given time series x, specified by
permutation order n and embedding lag tau.
Parameters
----------
x
list
a time series
n
integer
Permutation order
tau
integer
Embedding lag
Returns
----------
PE
float
permutation entropy
Notes
----------
Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)].
We first build embedding matrix Em, of dimension(n*N-n+1),
such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence
the embedding lag and the embedding dimension are 1 and n
respectively. We build this matrix from a given time series,
X, by calling pyEEg function embed_seq(x,1,n).
We then transform each row of the embedding matrix into
a new sequence, comprising a set of integers in range of 0,..,n-1.
The order in which the integers are placed within a row is the
same as those of the original elements:0 is placed where the smallest
element of the row was and n-1 replaces the largest element of the row.
To calculate the Permutation entropy, we calculate the entropy of PeSeq.
In doing so, we count the number of occurrences of each permutation
in PeSeq and write it in a sequence, RankMat. We then use this sequence to
calculate entropy by using Shannon's entropy formula.
Permutation entropy is usually calculated with n in range of 3 and 7.
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural
complexity measure for time series." Physical Review Letters 88.17
(2002): 174102.
Examples
----------
>>> import pyeeg
>>> x = [1,2,4,5,12,3,4,5]
>>> pyeeg.permutation_entropy(x,5,1)
2.0
### Response:
def permutation_entropy(x, n, tau):
"""Compute Permutation Entropy of a given time series x, specified by
permutation order n and embedding lag tau.
Parameters
----------
x
list
a time series
n
integer
Permutation order
tau
integer
Embedding lag
Returns
----------
PE
float
permutation entropy
Notes
----------
Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)].
We first build embedding matrix Em, of dimension(n*N-n+1),
such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence
the embedding lag and the embedding dimension are 1 and n
respectively. We build this matrix from a given time series,
X, by calling pyEEg function embed_seq(x,1,n).
We then transform each row of the embedding matrix into
a new sequence, comprising a set of integers in range of 0,..,n-1.
The order in which the integers are placed within a row is the
same as those of the original elements:0 is placed where the smallest
element of the row was and n-1 replaces the largest element of the row.
To calculate the Permutation entropy, we calculate the entropy of PeSeq.
In doing so, we count the number of occurrences of each permutation
in PeSeq and write it in a sequence, RankMat. We then use this sequence to
calculate entropy by using Shannon's entropy formula.
Permutation entropy is usually calculated with n in range of 3 and 7.
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural
complexity measure for time series." Physical Review Letters 88.17
(2002): 174102.
Examples
----------
>>> import pyeeg
>>> x = [1,2,4,5,12,3,4,5]
>>> pyeeg.permutation_entropy(x,5,1)
2.0
"""
PeSeq = []
Em = embed_seq(x, tau, n)
for i in range(0, len(Em)):
r = []
z = []
for j in range(0, len(Em[i])):
z.append(Em[i][j])
for j in range(0, len(Em[i])):
z.sort()
r.append(z.index(Em[i][j]))
z[z.index(Em[i][j])] = -1
PeSeq.append(r)
RankMat = []
while len(PeSeq) > 0:
RankMat.append(PeSeq.count(PeSeq[0]))
x = PeSeq[0]
for j in range(0, PeSeq.count(PeSeq[0])):
PeSeq.pop(PeSeq.index(x))
RankMat = numpy.array(RankMat)
RankMat = numpy.true_divide(RankMat, RankMat.sum())
EntropyMat = numpy.multiply(numpy.log2(RankMat), RankMat)
PE = -1 * EntropyMat.sum()
return PE |
def load_sampleset(self, f, name):
'''Read the sampleset from using the HDF5 format. Name is usually in {train, test}.'''
self.encoder_x = np.array(f[name + '_encoder_x'])
self.decoder_x = np.array(f[name + '_decoder_x'])
self.decoder_y = np.array(f[name + '_decoder_y']) | Read the sampleset from using the HDF5 format. Name is usually in {train, test}. | Below is the the instruction that describes the task:
### Input:
Read the sampleset from using the HDF5 format. Name is usually in {train, test}.
### Response:
def load_sampleset(self, f, name):
'''Read the sampleset from using the HDF5 format. Name is usually in {train, test}.'''
self.encoder_x = np.array(f[name + '_encoder_x'])
self.decoder_x = np.array(f[name + '_decoder_x'])
self.decoder_y = np.array(f[name + '_decoder_y']) |
def setParentAnalysisRequest(self, value):
"""Sets a parent analysis request, making the current a partition
"""
self.Schema().getField("ParentAnalysisRequest").set(self, value)
if not value:
noLongerProvides(self, IAnalysisRequestPartition)
else:
alsoProvides(self, IAnalysisRequestPartition) | Sets a parent analysis request, making the current a partition | Below is the the instruction that describes the task:
### Input:
Sets a parent analysis request, making the current a partition
### Response:
def setParentAnalysisRequest(self, value):
"""Sets a parent analysis request, making the current a partition
"""
self.Schema().getField("ParentAnalysisRequest").set(self, value)
if not value:
noLongerProvides(self, IAnalysisRequestPartition)
else:
alsoProvides(self, IAnalysisRequestPartition) |
def fill(self, *args):
"""
Apply a solid fill to your chart
args are of the form <fill type>,<fill style>,...
fill type must be one of c,bg,a
fill style must be one of s,lg,ls
the rest of the args refer to the particular style
APIPARAM: chf
"""
a,b = args[:2]
assert a in ('c','bg','a'), 'Fill type must be bg/c/a not %s'%a
assert b in ('s','lg','ls'), 'Fill style must be s/lg/ls not %s'%b
if len(args) == 3:
args = color_args(args, 2)
else:
args = color_args(args, 3,5)
self.fills.append(','.join(map(str,args)))
return self | Apply a solid fill to your chart
args are of the form <fill type>,<fill style>,...
fill type must be one of c,bg,a
fill style must be one of s,lg,ls
the rest of the args refer to the particular style
APIPARAM: chf | Below is the the instruction that describes the task:
### Input:
Apply a solid fill to your chart
args are of the form <fill type>,<fill style>,...
fill type must be one of c,bg,a
fill style must be one of s,lg,ls
the rest of the args refer to the particular style
APIPARAM: chf
### Response:
def fill(self, *args):
"""
Apply a solid fill to your chart
args are of the form <fill type>,<fill style>,...
fill type must be one of c,bg,a
fill style must be one of s,lg,ls
the rest of the args refer to the particular style
APIPARAM: chf
"""
a,b = args[:2]
assert a in ('c','bg','a'), 'Fill type must be bg/c/a not %s'%a
assert b in ('s','lg','ls'), 'Fill style must be s/lg/ls not %s'%b
if len(args) == 3:
args = color_args(args, 2)
else:
args = color_args(args, 3,5)
self.fills.append(','.join(map(str,args)))
return self |
def get_img_attrs(self, style=None, **kwargs):
""" Get an attribute list (src, srcset, style, et al) for the image.
style -- an optional list of CSS style fragments
Returns: a dict of attributes e.g. {'src':'foo.jpg','srcset':'foo.jpg 1x, bar.jpg 2x']
"""
add = {}
if 'prefix' in kwargs:
attr_prefixes = kwargs.get('prefix')
if isinstance(kwargs['prefix'], str):
attr_prefixes = [attr_prefixes]
for prefix in attr_prefixes:
for k, val in kwargs.items():
if k.startswith(prefix):
add[k[len(prefix):]] = val
return self._get_img_attrs(style, {**kwargs, **add}) | Get an attribute list (src, srcset, style, et al) for the image.
style -- an optional list of CSS style fragments
Returns: a dict of attributes e.g. {'src':'foo.jpg','srcset':'foo.jpg 1x, bar.jpg 2x'] | Below is the the instruction that describes the task:
### Input:
Get an attribute list (src, srcset, style, et al) for the image.
style -- an optional list of CSS style fragments
Returns: a dict of attributes e.g. {'src':'foo.jpg','srcset':'foo.jpg 1x, bar.jpg 2x']
### Response:
def get_img_attrs(self, style=None, **kwargs):
""" Get an attribute list (src, srcset, style, et al) for the image.
style -- an optional list of CSS style fragments
Returns: a dict of attributes e.g. {'src':'foo.jpg','srcset':'foo.jpg 1x, bar.jpg 2x']
"""
add = {}
if 'prefix' in kwargs:
attr_prefixes = kwargs.get('prefix')
if isinstance(kwargs['prefix'], str):
attr_prefixes = [attr_prefixes]
for prefix in attr_prefixes:
for k, val in kwargs.items():
if k.startswith(prefix):
add[k[len(prefix):]] = val
return self._get_img_attrs(style, {**kwargs, **add}) |
def get_by_index(self, i):
"""Look up a gene set by its index.
Parameters
----------
i: int
The index of the gene set.
Returns
-------
GeneSet
The gene set.
Raises
------
ValueError
If the given index is out of bounds.
"""
if i >= self.n:
raise ValueError('Index %d out of bounds ' % i +
'for database with %d gene sets.' % self.n)
return self._gene_sets[self._gene_set_ids[i]] | Look up a gene set by its index.
Parameters
----------
i: int
The index of the gene set.
Returns
-------
GeneSet
The gene set.
Raises
------
ValueError
If the given index is out of bounds. | Below is the the instruction that describes the task:
### Input:
Look up a gene set by its index.
Parameters
----------
i: int
The index of the gene set.
Returns
-------
GeneSet
The gene set.
Raises
------
ValueError
If the given index is out of bounds.
### Response:
def get_by_index(self, i):
"""Look up a gene set by its index.
Parameters
----------
i: int
The index of the gene set.
Returns
-------
GeneSet
The gene set.
Raises
------
ValueError
If the given index is out of bounds.
"""
if i >= self.n:
raise ValueError('Index %d out of bounds ' % i +
'for database with %d gene sets.' % self.n)
return self._gene_sets[self._gene_set_ids[i]] |
def getLabel(self):
"""Returns symbolic path to this MIB variable.
Meaning a sequence of symbolic identifications for each of parent
MIB objects in MIB tree.
Returns
-------
tuple
sequence of names of nodes in a MIB tree from the top of the tree
towards this MIB variable.
Raises
------
SmiError
If MIB variable conversion has not been performed.
Notes
-----
Returned sequence may not contain full path to this MIB variable
if some symbols are now known at the moment of MIB look up.
Examples
--------
>>> objectIdentity = ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)
>>> objectIdentity.resolveWithMib(mibViewController)
>>> objectIdentity.getOid()
('iso', 'org', 'dod', 'internet', 'mgmt', 'mib-2', 'system', 'sysDescr')
>>>
"""
if self._state & self.ST_CLEAN:
return self._label
else:
raise SmiError(
'%s object not fully initialized' % self.__class__.__name__) | Returns symbolic path to this MIB variable.
Meaning a sequence of symbolic identifications for each of parent
MIB objects in MIB tree.
Returns
-------
tuple
sequence of names of nodes in a MIB tree from the top of the tree
towards this MIB variable.
Raises
------
SmiError
If MIB variable conversion has not been performed.
Notes
-----
Returned sequence may not contain full path to this MIB variable
if some symbols are now known at the moment of MIB look up.
Examples
--------
>>> objectIdentity = ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)
>>> objectIdentity.resolveWithMib(mibViewController)
>>> objectIdentity.getOid()
('iso', 'org', 'dod', 'internet', 'mgmt', 'mib-2', 'system', 'sysDescr')
>>> | Below is the the instruction that describes the task:
### Input:
Returns symbolic path to this MIB variable.
Meaning a sequence of symbolic identifications for each of parent
MIB objects in MIB tree.
Returns
-------
tuple
sequence of names of nodes in a MIB tree from the top of the tree
towards this MIB variable.
Raises
------
SmiError
If MIB variable conversion has not been performed.
Notes
-----
Returned sequence may not contain full path to this MIB variable
if some symbols are now known at the moment of MIB look up.
Examples
--------
>>> objectIdentity = ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)
>>> objectIdentity.resolveWithMib(mibViewController)
>>> objectIdentity.getOid()
('iso', 'org', 'dod', 'internet', 'mgmt', 'mib-2', 'system', 'sysDescr')
>>>
### Response:
def getLabel(self):
"""Returns symbolic path to this MIB variable.
Meaning a sequence of symbolic identifications for each of parent
MIB objects in MIB tree.
Returns
-------
tuple
sequence of names of nodes in a MIB tree from the top of the tree
towards this MIB variable.
Raises
------
SmiError
If MIB variable conversion has not been performed.
Notes
-----
Returned sequence may not contain full path to this MIB variable
if some symbols are now known at the moment of MIB look up.
Examples
--------
>>> objectIdentity = ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)
>>> objectIdentity.resolveWithMib(mibViewController)
>>> objectIdentity.getOid()
('iso', 'org', 'dod', 'internet', 'mgmt', 'mib-2', 'system', 'sysDescr')
>>>
"""
if self._state & self.ST_CLEAN:
return self._label
else:
raise SmiError(
'%s object not fully initialized' % self.__class__.__name__) |
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0) | initialize visit variables and statistics | Below is the the instruction that describes the task:
### Input:
initialize visit variables and statistics
### Response:
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0) |
def _check_for_fail_message(self, transport, exc_info, timeout): # pylint: disable=no-self-use
"""Check for a 'FAIL' message from transport.
This method always raises, if 'FAIL' was read, it will raise an
AdbRemoteError with the message, otherwise it will raise based on
exc_info, which should be a tuple as per sys.exc_info().
Args:
transport: Transport from which to read for a 'FAIL' message.
exc_info: Exception info to raise if no 'FAIL' is read.
timeout: Timeout to use for the read operation.
Raises:
AdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info.
"""
try:
transport.read_message(timeout)
except usb_exceptions.CommonUsbError:
# If we got a remote error, raise that exception.
if sys.exc_info()[0] is usb_exceptions.AdbRemoteError:
raise
# Otherwise reraise the original exception.
raise_with_traceback(exc_info[0](exc_info[1]), traceback=exc_info[2]) | Check for a 'FAIL' message from transport.
This method always raises, if 'FAIL' was read, it will raise an
AdbRemoteError with the message, otherwise it will raise based on
exc_info, which should be a tuple as per sys.exc_info().
Args:
transport: Transport from which to read for a 'FAIL' message.
exc_info: Exception info to raise if no 'FAIL' is read.
timeout: Timeout to use for the read operation.
Raises:
AdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info. | Below is the the instruction that describes the task:
### Input:
Check for a 'FAIL' message from transport.
This method always raises, if 'FAIL' was read, it will raise an
AdbRemoteError with the message, otherwise it will raise based on
exc_info, which should be a tuple as per sys.exc_info().
Args:
transport: Transport from which to read for a 'FAIL' message.
exc_info: Exception info to raise if no 'FAIL' is read.
timeout: Timeout to use for the read operation.
Raises:
AdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info.
### Response:
def _check_for_fail_message(self, transport, exc_info, timeout): # pylint: disable=no-self-use
"""Check for a 'FAIL' message from transport.
This method always raises, if 'FAIL' was read, it will raise an
AdbRemoteError with the message, otherwise it will raise based on
exc_info, which should be a tuple as per sys.exc_info().
Args:
transport: Transport from which to read for a 'FAIL' message.
exc_info: Exception info to raise if no 'FAIL' is read.
timeout: Timeout to use for the read operation.
Raises:
AdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info.
"""
try:
transport.read_message(timeout)
except usb_exceptions.CommonUsbError:
# If we got a remote error, raise that exception.
if sys.exc_info()[0] is usb_exceptions.AdbRemoteError:
raise
# Otherwise reraise the original exception.
raise_with_traceback(exc_info[0](exc_info[1]), traceback=exc_info[2]) |
def config_parse(files=None, config=None, config_profile=".fissconfig", **kwargs):
'''
Read initial configuration state, from named config files; store
this state within a config dictionary (which may be nested) whose keys may
also be referenced as attributes (safely, defaulting to None if unset). A
config object may be passed in, as a way of accumulating or overwriting
configuration state; if one is NOT passed, the default config obj is used
'''
local_config = config
config = __fcconfig
cfgparser = configparser.SafeConfigParser()
filenames = list()
# Give personal/user followed by current working directory configuration the first say
filenames.append(os.path.join(os.path.expanduser('~'), config_profile))
filenames.append(os.path.join(os.getcwd(), config_profile))
if files:
if isinstance(files, string_types):
filenames.append(files)
elif isinstance(files, Iterable):
for f in files:
if isinstance(f, IOBase):
f = f.name
filenames.append(f)
cfgparser.read(filenames)
# [DEFAULT] defines common variables for interpolation/substitution in
# other sections, and are stored at the root level of the config object
for keyval in cfgparser.items('DEFAULT'):
#print("config_parse: adding config variable %s=%s" % (keyval[0], str(keyval[1])))
__fcconfig[keyval[0]] = keyval[1]
for section in cfgparser.sections():
config[section] = attrdict()
for option in cfgparser.options(section):
# DEFAULT vars ALSO behave as though they were defined in every
# section, but we purposely skip them here so that each section
# reflects only the options explicitly defined in that section
if not config[option]:
config[section][option] = cfgparser.get(section, option)
config.verbosity = int(config.verbosity)
if not config.root_url.endswith('/'):
config.root_url += '/'
if os.path.isfile(config.credentials):
os.environ[environment_vars.CREDENTIALS] = config.credentials
# if local_config override options with passed options
if local_config is not None:
for key, value in local_config.items():
config[key] = value
# if any explict config options are passed override.
for key, value in kwargs.items():
config[key] = value
return config | Read initial configuration state, from named config files; store
this state within a config dictionary (which may be nested) whose keys may
also be referenced as attributes (safely, defaulting to None if unset). A
config object may be passed in, as a way of accumulating or overwriting
configuration state; if one is NOT passed, the default config obj is used | Below is the the instruction that describes the task:
### Input:
Read initial configuration state, from named config files; store
this state within a config dictionary (which may be nested) whose keys may
also be referenced as attributes (safely, defaulting to None if unset). A
config object may be passed in, as a way of accumulating or overwriting
configuration state; if one is NOT passed, the default config obj is used
### Response:
def config_parse(files=None, config=None, config_profile=".fissconfig", **kwargs):
'''
Read initial configuration state, from named config files; store
this state within a config dictionary (which may be nested) whose keys may
also be referenced as attributes (safely, defaulting to None if unset). A
config object may be passed in, as a way of accumulating or overwriting
configuration state; if one is NOT passed, the default config obj is used
'''
local_config = config
config = __fcconfig
cfgparser = configparser.SafeConfigParser()
filenames = list()
# Give personal/user followed by current working directory configuration the first say
filenames.append(os.path.join(os.path.expanduser('~'), config_profile))
filenames.append(os.path.join(os.getcwd(), config_profile))
if files:
if isinstance(files, string_types):
filenames.append(files)
elif isinstance(files, Iterable):
for f in files:
if isinstance(f, IOBase):
f = f.name
filenames.append(f)
cfgparser.read(filenames)
# [DEFAULT] defines common variables for interpolation/substitution in
# other sections, and are stored at the root level of the config object
for keyval in cfgparser.items('DEFAULT'):
#print("config_parse: adding config variable %s=%s" % (keyval[0], str(keyval[1])))
__fcconfig[keyval[0]] = keyval[1]
for section in cfgparser.sections():
config[section] = attrdict()
for option in cfgparser.options(section):
# DEFAULT vars ALSO behave as though they were defined in every
# section, but we purposely skip them here so that each section
# reflects only the options explicitly defined in that section
if not config[option]:
config[section][option] = cfgparser.get(section, option)
config.verbosity = int(config.verbosity)
if not config.root_url.endswith('/'):
config.root_url += '/'
if os.path.isfile(config.credentials):
os.environ[environment_vars.CREDENTIALS] = config.credentials
# if local_config override options with passed options
if local_config is not None:
for key, value in local_config.items():
config[key] = value
# if any explict config options are passed override.
for key, value in kwargs.items():
config[key] = value
return config |
def get_design_document(self, ddoc_id):
"""
Retrieves a design document. If a design document exists remotely
then that content is wrapped in a DesignDocument object and returned
to the caller. Otherwise a "shell" DesignDocument object is returned.
:param str ddoc_id: Design document id
:returns: A DesignDocument instance, if exists remotely then it will
be populated accordingly
"""
ddoc = DesignDocument(self, ddoc_id)
try:
ddoc.fetch()
except HTTPError as error:
if error.response.status_code != 404:
raise
return ddoc | Retrieves a design document. If a design document exists remotely
then that content is wrapped in a DesignDocument object and returned
to the caller. Otherwise a "shell" DesignDocument object is returned.
:param str ddoc_id: Design document id
:returns: A DesignDocument instance, if exists remotely then it will
be populated accordingly | Below is the the instruction that describes the task:
### Input:
Retrieves a design document. If a design document exists remotely
then that content is wrapped in a DesignDocument object and returned
to the caller. Otherwise a "shell" DesignDocument object is returned.
:param str ddoc_id: Design document id
:returns: A DesignDocument instance, if exists remotely then it will
be populated accordingly
### Response:
def get_design_document(self, ddoc_id):
"""
Retrieves a design document. If a design document exists remotely
then that content is wrapped in a DesignDocument object and returned
to the caller. Otherwise a "shell" DesignDocument object is returned.
:param str ddoc_id: Design document id
:returns: A DesignDocument instance, if exists remotely then it will
be populated accordingly
"""
ddoc = DesignDocument(self, ddoc_id)
try:
ddoc.fetch()
except HTTPError as error:
if error.response.status_code != 404:
raise
return ddoc |
def profiling(self):
"""A generator which profiles then broadcasts the result. Implement
sleeping loop using this::
def profile_periodically(self):
for __ in self.profiling():
time.sleep(self.interval)
"""
self._log_profiler_started()
while self.clients:
try:
self.profiler.start()
except RuntimeError:
pass
# should sleep.
yield
self.profiler.stop()
result = self.profiler.result()
data = pack_msg(RESULT, result,
pickle_protocol=self.pickle_protocol)
self._latest_result_data = data
# broadcast.
closed_clients = []
for client in self.clients:
try:
self._send(client, data)
except socket.error as exc:
if exc.errno == EPIPE:
closed_clients.append(client)
del data
# handle disconnections.
for client in closed_clients:
self.disconnected(client)
self._log_profiler_stopped() | A generator which profiles then broadcasts the result. Implement
sleeping loop using this::
def profile_periodically(self):
for __ in self.profiling():
time.sleep(self.interval) | Below is the the instruction that describes the task:
### Input:
A generator which profiles then broadcasts the result. Implement
sleeping loop using this::
def profile_periodically(self):
for __ in self.profiling():
time.sleep(self.interval)
### Response:
def profiling(self):
"""A generator which profiles then broadcasts the result. Implement
sleeping loop using this::
def profile_periodically(self):
for __ in self.profiling():
time.sleep(self.interval)
"""
self._log_profiler_started()
while self.clients:
try:
self.profiler.start()
except RuntimeError:
pass
# should sleep.
yield
self.profiler.stop()
result = self.profiler.result()
data = pack_msg(RESULT, result,
pickle_protocol=self.pickle_protocol)
self._latest_result_data = data
# broadcast.
closed_clients = []
for client in self.clients:
try:
self._send(client, data)
except socket.error as exc:
if exc.errno == EPIPE:
closed_clients.append(client)
del data
# handle disconnections.
for client in closed_clients:
self.disconnected(client)
self._log_profiler_stopped() |
def get_plan(self, nodes=None):
"""
Retrieve a plan, e.g. a list of fixtures to be loaded sorted on
dependency.
:param list nodes: list of nodes to be loaded.
:return:
"""
if nodes:
plan = self.graph.resolve_nodes(nodes)
else:
plan = self.graph.resolve_node()
return plan | Retrieve a plan, e.g. a list of fixtures to be loaded sorted on
dependency.
:param list nodes: list of nodes to be loaded.
:return: | Below is the the instruction that describes the task:
### Input:
Retrieve a plan, e.g. a list of fixtures to be loaded sorted on
dependency.
:param list nodes: list of nodes to be loaded.
:return:
### Response:
def get_plan(self, nodes=None):
"""
Retrieve a plan, e.g. a list of fixtures to be loaded sorted on
dependency.
:param list nodes: list of nodes to be loaded.
:return:
"""
if nodes:
plan = self.graph.resolve_nodes(nodes)
else:
plan = self.graph.resolve_node()
return plan |
def to_ufo_paths(self, ufo_glyph, layer):
"""Draw .glyphs paths onto a pen."""
pen = ufo_glyph.getPointPen()
for path in layer.paths:
# the list is changed below, otherwise you can't draw more than once
# per session.
nodes = list(path.nodes)
for node in nodes:
self.to_ufo_node_user_data(ufo_glyph, node)
pen.beginPath()
if not nodes:
pen.endPath()
continue
if not path.closed:
node = nodes.pop(0)
assert node.type == "line", "Open path starts with off-curve points"
pen.addPoint(tuple(node.position), segmentType="move")
else:
# In Glyphs.app, the starting node of a closed contour is always
# stored at the end of the nodes list.
nodes.insert(0, nodes.pop())
for node in nodes:
node_type = _to_ufo_node_type(node.type)
pen.addPoint(
tuple(node.position), segmentType=node_type, smooth=node.smooth
)
pen.endPath() | Draw .glyphs paths onto a pen. | Below is the the instruction that describes the task:
### Input:
Draw .glyphs paths onto a pen.
### Response:
def to_ufo_paths(self, ufo_glyph, layer):
"""Draw .glyphs paths onto a pen."""
pen = ufo_glyph.getPointPen()
for path in layer.paths:
# the list is changed below, otherwise you can't draw more than once
# per session.
nodes = list(path.nodes)
for node in nodes:
self.to_ufo_node_user_data(ufo_glyph, node)
pen.beginPath()
if not nodes:
pen.endPath()
continue
if not path.closed:
node = nodes.pop(0)
assert node.type == "line", "Open path starts with off-curve points"
pen.addPoint(tuple(node.position), segmentType="move")
else:
# In Glyphs.app, the starting node of a closed contour is always
# stored at the end of the nodes list.
nodes.insert(0, nodes.pop())
for node in nodes:
node_type = _to_ufo_node_type(node.type)
pen.addPoint(
tuple(node.position), segmentType=node_type, smooth=node.smooth
)
pen.endPath() |
def get(self,path):
"""
permet de récupérer une config
Args:
path (String): Nom d'une config
Returns:
type: String
la valeur de la config ou None
"""
path = path.upper()
if path in self._configCache:
return self._configCache[path]
else :
return self._findConfig(path) | permet de récupérer une config
Args:
path (String): Nom d'une config
Returns:
type: String
la valeur de la config ou None | Below is the the instruction that describes the task:
### Input:
permet de récupérer une config
Args:
path (String): Nom d'une config
Returns:
type: String
la valeur de la config ou None
### Response:
def get(self,path):
"""
permet de récupérer une config
Args:
path (String): Nom d'une config
Returns:
type: String
la valeur de la config ou None
"""
path = path.upper()
if path in self._configCache:
return self._configCache[path]
else :
return self._findConfig(path) |
def work_cancel(self, hash):
"""
Stop generating **work** for block
.. enable_control required
:param hash: Hash to stop generating work for
:type hash: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.work_cancel(
... hash="718CC2121C3E641059BC1C2CFC45666C99E8AE922F7A807B7D07B62C995D79E2"
... )
True
"""
hash = self._process_value(hash, 'block')
payload = {"hash": hash}
resp = self.call('work_cancel', payload)
return resp == {} | Stop generating **work** for block
.. enable_control required
:param hash: Hash to stop generating work for
:type hash: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.work_cancel(
... hash="718CC2121C3E641059BC1C2CFC45666C99E8AE922F7A807B7D07B62C995D79E2"
... )
True | Below is the the instruction that describes the task:
### Input:
Stop generating **work** for block
.. enable_control required
:param hash: Hash to stop generating work for
:type hash: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.work_cancel(
... hash="718CC2121C3E641059BC1C2CFC45666C99E8AE922F7A807B7D07B62C995D79E2"
... )
True
### Response:
def work_cancel(self, hash):
"""
Stop generating **work** for block
.. enable_control required
:param hash: Hash to stop generating work for
:type hash: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.work_cancel(
... hash="718CC2121C3E641059BC1C2CFC45666C99E8AE922F7A807B7D07B62C995D79E2"
... )
True
"""
hash = self._process_value(hash, 'block')
payload = {"hash": hash}
resp = self.call('work_cancel', payload)
return resp == {} |
def new(self):
"""
The new value present in the event.
"""
ori = self.original.action
if isinstance(ori, (
types.ChannelAdminLogEventActionChangeAbout,
types.ChannelAdminLogEventActionChangeTitle,
types.ChannelAdminLogEventActionChangeUsername,
types.ChannelAdminLogEventActionToggleInvites,
types.ChannelAdminLogEventActionTogglePreHistoryHidden,
types.ChannelAdminLogEventActionToggleSignatures
)):
return ori.new_value
elif isinstance(ori, types.ChannelAdminLogEventActionChangePhoto):
return ori.new_photo
elif isinstance(ori, types.ChannelAdminLogEventActionChangeStickerSet):
return ori.new_stickerset
elif isinstance(ori, types.ChannelAdminLogEventActionEditMessage):
return ori.new_message
elif isinstance(ori, (
types.ChannelAdminLogEventActionParticipantToggleAdmin,
types.ChannelAdminLogEventActionParticipantToggleBan
)):
return ori.new_participant
elif isinstance(ori, types.ChannelAdminLogEventActionParticipantInvite):
return ori.participant
elif isinstance(ori, types.ChannelAdminLogEventActionDefaultBannedRights):
return ori.new_banned_rights
elif isinstance(ori, types.ChannelAdminLogEventActionStopPoll):
return ori.message | The new value present in the event. | Below is the the instruction that describes the task:
### Input:
The new value present in the event.
### Response:
def new(self):
"""
The new value present in the event.
"""
ori = self.original.action
if isinstance(ori, (
types.ChannelAdminLogEventActionChangeAbout,
types.ChannelAdminLogEventActionChangeTitle,
types.ChannelAdminLogEventActionChangeUsername,
types.ChannelAdminLogEventActionToggleInvites,
types.ChannelAdminLogEventActionTogglePreHistoryHidden,
types.ChannelAdminLogEventActionToggleSignatures
)):
return ori.new_value
elif isinstance(ori, types.ChannelAdminLogEventActionChangePhoto):
return ori.new_photo
elif isinstance(ori, types.ChannelAdminLogEventActionChangeStickerSet):
return ori.new_stickerset
elif isinstance(ori, types.ChannelAdminLogEventActionEditMessage):
return ori.new_message
elif isinstance(ori, (
types.ChannelAdminLogEventActionParticipantToggleAdmin,
types.ChannelAdminLogEventActionParticipantToggleBan
)):
return ori.new_participant
elif isinstance(ori, types.ChannelAdminLogEventActionParticipantInvite):
return ori.participant
elif isinstance(ori, types.ChannelAdminLogEventActionDefaultBannedRights):
return ori.new_banned_rights
elif isinstance(ori, types.ChannelAdminLogEventActionStopPoll):
return ori.message |
def add_namespace_uri(self, ns_uri, prefix=None, schema_location=None):
"""Adds a new namespace to this set, optionally with a prefix and
schema location URI.
If the namespace already exists, the given prefix and schema location
are merged with the existing entry:
* If non-None, ``prefix`` is added to the set. The preferred
prefix is not modified.
* If a schema location is not already associated with the
namespace, it is set to ``schema_location`` (if given).
If the namespace doesn't already exist in this set (so a new one is
being created) and a prefix is given, that prefix becomes preferred.
If not given, a preference as a default namespace is used.
Args:
ns_uri (str): The URI of the new namespace
prefix (str): The desired prefix for the new namespace (optional)
schema_location (str): The desired schema location for the new
namespace (optional).
Raises:
DuplicatePrefixError: If a prefix is given which already maps to a
different namespace
ConflictingSchemaLocationError: If a schema location is given and
the namespace already exists in this set with a different
schema location.
"""
assert ns_uri
if ns_uri in self.__ns_uri_map:
# We have a _NamespaceInfo object for this URI already. So this
# is a merge operation.
#
# We modify a copy of the real _NamespaceInfo so that we are
# exception-safe: if something goes wrong, we don't end up with a
# half-changed NamespaceSet.
ni = self.__lookup_uri(ns_uri)
new_ni = copy.deepcopy(ni)
# Reconcile prefixes
if prefix:
self.__check_prefix_conflict(ni, prefix)
new_ni.prefixes.add(prefix)
self.__merge_schema_locations(new_ni, schema_location)
# At this point, we have a legit new_ni object. Now we update
# the set, ensuring our invariants. This should replace
# all instances of the old ni in this set.
for p in new_ni.prefixes:
self.__prefix_map[p] = new_ni
self.__ns_uri_map[new_ni.uri] = new_ni
else:
# A brand new namespace. The incoming prefix should not exist at
# all in the prefix map.
if prefix:
self.__check_prefix_conflict(ns_uri, prefix)
ni = _NamespaceInfo(ns_uri, prefix, schema_location)
self.__add_namespaceinfo(ni) | Adds a new namespace to this set, optionally with a prefix and
schema location URI.
If the namespace already exists, the given prefix and schema location
are merged with the existing entry:
* If non-None, ``prefix`` is added to the set. The preferred
prefix is not modified.
* If a schema location is not already associated with the
namespace, it is set to ``schema_location`` (if given).
If the namespace doesn't already exist in this set (so a new one is
being created) and a prefix is given, that prefix becomes preferred.
If not given, a preference as a default namespace is used.
Args:
ns_uri (str): The URI of the new namespace
prefix (str): The desired prefix for the new namespace (optional)
schema_location (str): The desired schema location for the new
namespace (optional).
Raises:
DuplicatePrefixError: If a prefix is given which already maps to a
different namespace
ConflictingSchemaLocationError: If a schema location is given and
the namespace already exists in this set with a different
schema location. | Below is the the instruction that describes the task:
### Input:
Adds a new namespace to this set, optionally with a prefix and
schema location URI.
If the namespace already exists, the given prefix and schema location
are merged with the existing entry:
* If non-None, ``prefix`` is added to the set. The preferred
prefix is not modified.
* If a schema location is not already associated with the
namespace, it is set to ``schema_location`` (if given).
If the namespace doesn't already exist in this set (so a new one is
being created) and a prefix is given, that prefix becomes preferred.
If not given, a preference as a default namespace is used.
Args:
ns_uri (str): The URI of the new namespace
prefix (str): The desired prefix for the new namespace (optional)
schema_location (str): The desired schema location for the new
namespace (optional).
Raises:
DuplicatePrefixError: If a prefix is given which already maps to a
different namespace
ConflictingSchemaLocationError: If a schema location is given and
the namespace already exists in this set with a different
schema location.
### Response:
def add_namespace_uri(self, ns_uri, prefix=None, schema_location=None):
"""Adds a new namespace to this set, optionally with a prefix and
schema location URI.
If the namespace already exists, the given prefix and schema location
are merged with the existing entry:
* If non-None, ``prefix`` is added to the set. The preferred
prefix is not modified.
* If a schema location is not already associated with the
namespace, it is set to ``schema_location`` (if given).
If the namespace doesn't already exist in this set (so a new one is
being created) and a prefix is given, that prefix becomes preferred.
If not given, a preference as a default namespace is used.
Args:
ns_uri (str): The URI of the new namespace
prefix (str): The desired prefix for the new namespace (optional)
schema_location (str): The desired schema location for the new
namespace (optional).
Raises:
DuplicatePrefixError: If a prefix is given which already maps to a
different namespace
ConflictingSchemaLocationError: If a schema location is given and
the namespace already exists in this set with a different
schema location.
"""
assert ns_uri
if ns_uri in self.__ns_uri_map:
# We have a _NamespaceInfo object for this URI already. So this
# is a merge operation.
#
# We modify a copy of the real _NamespaceInfo so that we are
# exception-safe: if something goes wrong, we don't end up with a
# half-changed NamespaceSet.
ni = self.__lookup_uri(ns_uri)
new_ni = copy.deepcopy(ni)
# Reconcile prefixes
if prefix:
self.__check_prefix_conflict(ni, prefix)
new_ni.prefixes.add(prefix)
self.__merge_schema_locations(new_ni, schema_location)
# At this point, we have a legit new_ni object. Now we update
# the set, ensuring our invariants. This should replace
# all instances of the old ni in this set.
for p in new_ni.prefixes:
self.__prefix_map[p] = new_ni
self.__ns_uri_map[new_ni.uri] = new_ni
else:
# A brand new namespace. The incoming prefix should not exist at
# all in the prefix map.
if prefix:
self.__check_prefix_conflict(ns_uri, prefix)
ni = _NamespaceInfo(ns_uri, prefix, schema_location)
self.__add_namespaceinfo(ni) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.