code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def ordersku_update(self, oid, sku_id=None, sku_props=None):
'''taobao.trade.ordersku.update 更新交易订单的销售属性
需要商家或以上权限才可调用此接口,可重复调用本接口更新交易备注,本接口同时具有添加备注的功能'''
request = TOPRequest('taobao.trade.ordersku.update')
request['oid'] = oid
if sku_id!=None: request['sku_id'] = sku_id
if sku_props!=None: request['sku_props'] = sku_props
self.create(self.execute(request)['order'])
return self | taobao.trade.ordersku.update 更新交易订单的销售属性
需要商家或以上权限才可调用此接口,可重复调用本接口更新交易备注,本接口同时具有添加备注的功能 | Below is the the instruction that describes the task:
### Input:
taobao.trade.ordersku.update 更新交易订单的销售属性
需要商家或以上权限才可调用此接口,可重复调用本接口更新交易备注,本接口同时具有添加备注的功能
### Response:
def ordersku_update(self, oid, sku_id=None, sku_props=None):
'''taobao.trade.ordersku.update 更新交易订单的销售属性
需要商家或以上权限才可调用此接口,可重复调用本接口更新交易备注,本接口同时具有添加备注的功能'''
request = TOPRequest('taobao.trade.ordersku.update')
request['oid'] = oid
if sku_id!=None: request['sku_id'] = sku_id
if sku_props!=None: request['sku_props'] = sku_props
self.create(self.execute(request)['order'])
return self |
def http_construct(args, unknown):
"""
Construct the --http <arg> from the args/unknown space -- relevant only for 'purl'.
:param args:
:param unknown:
:return:
"""
str_http = ''
b_httpSpecd = False
if '--http' in unknown:
try:
str_httpArg = unknown[unknown.index('--http')+1]
unknown.remove('--http')
unknown.remove(str_httpArg)
except:
str_httpArg = ""
str_http = '--http %s' % str_httpArg
b_httpSpecd = True
if not b_httpSpecd:
str_serverIP = "172.17.0.2"
str_serverPort = '5010'
try:
if args.b_pman:
str_serverIP = os.environ['PMAN_PORT_5010_TCP_ADDR']
str_serverPort = os.environ['PMAN_PORT_5010_TCP_PORT']
if args.b_pfioh:
str_serverIP = os.environ['PFIOH_PORT_5055_TCP_ADDR']
str_serverPort = os.environ['PFIOH_PORT_5055_TCP_PORT']
except:
pass
str_http = '--http %s:%s/api/v1/cmd/' % (str_serverIP, str_serverPort)
return str_http | Construct the --http <arg> from the args/unknown space -- relevant only for 'purl'.
:param args:
:param unknown:
:return: | Below is the the instruction that describes the task:
### Input:
Construct the --http <arg> from the args/unknown space -- relevant only for 'purl'.
:param args:
:param unknown:
:return:
### Response:
def http_construct(args, unknown):
"""
Construct the --http <arg> from the args/unknown space -- relevant only for 'purl'.
:param args:
:param unknown:
:return:
"""
str_http = ''
b_httpSpecd = False
if '--http' in unknown:
try:
str_httpArg = unknown[unknown.index('--http')+1]
unknown.remove('--http')
unknown.remove(str_httpArg)
except:
str_httpArg = ""
str_http = '--http %s' % str_httpArg
b_httpSpecd = True
if not b_httpSpecd:
str_serverIP = "172.17.0.2"
str_serverPort = '5010'
try:
if args.b_pman:
str_serverIP = os.environ['PMAN_PORT_5010_TCP_ADDR']
str_serverPort = os.environ['PMAN_PORT_5010_TCP_PORT']
if args.b_pfioh:
str_serverIP = os.environ['PFIOH_PORT_5055_TCP_ADDR']
str_serverPort = os.environ['PFIOH_PORT_5055_TCP_PORT']
except:
pass
str_http = '--http %s:%s/api/v1/cmd/' % (str_serverIP, str_serverPort)
return str_http |
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service | Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
### Response:
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service |
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d) | Convert to the next higher level summary level | Below is the the instruction that describes the task:
### Input:
Convert to the next higher level summary level
### Response:
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d) |
def most_visited_pages_charts():
"""Chart for most visited pages."""
stats = most_visited_pages_stats()
charts = []
for i, stat in enumerate(stats['more_than_10']):
bound = stat['bound']
subset = stat['subset']
chart_options = {
'chart': {
'type': 'bar',
'height': 15 * len(subset) + 100
},
'title': {
'text': {0: _('More than %d times') % bound}.get(
i, _('Between %d and %d times') % (
bound, stats['more_than_10'][i - 1]['bound']))
},
'xAxis': {
'categories': [u for (u, c, t) in subset],
'title': {
'text': None
}
},
'yAxis': {
'title': {
'text': None
}
},
'plotOptions': {
'bar': {
'dataLabels': {
'enabled': True
}
},
},
'tooltip': {
'enabled': False
},
'legend': {
'enabled': False
},
'credits': {
'enabled': False
},
}
series_data = []
for index, (url, count, url_type) in enumerate(subset):
data = {
'x': index,
'y': count
}
color = URL_TYPE_COLOR[url_type]
data['color'] = color
series_data.append(data)
chart_options['series'] = [{
'name': _('Requests'),
'data': series_data
}]
charts.append(chart_options)
point_formatter_code = """
return '<br>%s: <strong>' + this.dis + '</strong>(' +
Highcharts.numberFormat(this.dis / this.total_dis * 100, 1) + '%%)' +
'<br>%s: <strong>' + this.occ + '</strong> (' +
Highcharts.numberFormat(this.occ / this.total_occ * 100, 1) + '%%)';
""" % (_('Distinct URLs'), _('Occurrences'))
occurrences = stats['less_than_10']
total_distinct = sum([v['distinct'] for k, v in occurrences.items()])
total_occurrences = sum([v['total'] for k, v in occurrences.items()])
charts.append({
'chart': {
'plotBackgroundColor': None,
'plotBorderWidth': None,
'plotShadow': False,
'type': 'pie'
},
'title': {
'text': _('Less than 10 (type repartition)')
},
'plotOptions': {
'pie': {
'allowPointSelect': True,
'cursor': 'pointer',
'dataLabels': {
'enabled': False
},
'showInLegend': True,
'tooltip': {
'pointFormatter': point_formatter_code
},
}
},
'series': [{
'name': '',
'colorByPoint': True,
'data': [{
'name': _('Valid project URL'),
'dis': occurrences[PROJECT]['distinct'],
'y': occurrences[PROJECT]['total'],
'occ': occurrences[PROJECT]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[PROJECT]
}, {
'name': _('Old project URL'),
'dis': occurrences[OLD_PROJECT]['distinct'],
'y': occurrences[OLD_PROJECT]['total'],
'occ': occurrences[OLD_PROJECT]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[OLD_PROJECT]
}, {
'name': _('Valid asset URL'),
'dis': occurrences[ASSET]['distinct'],
'y': occurrences[ASSET]['total'],
'occ': occurrences[ASSET]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[ASSET]
}, {
'name': _('Old asset URL'),
'dis': occurrences[OLD_ASSET]['distinct'],
'y': occurrences[OLD_ASSET]['total'],
'occ': occurrences[OLD_ASSET]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[OLD_ASSET]
}, {
'name': _('Common asset URL'),
'dis': occurrences[COMMON_ASSET]['distinct'],
'y': occurrences[COMMON_ASSET]['total'],
'occ': occurrences[COMMON_ASSET]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[COMMON_ASSET]
}, {
'name': _('False-negative project URL'),
'dis': occurrences[FALSE_NEGATIVE]['distinct'],
'y': occurrences[FALSE_NEGATIVE]['total'],
'occ': occurrences[FALSE_NEGATIVE]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[FALSE_NEGATIVE]
}, {
'name': _('Suspicious URL (potential attack)'),
'dis': occurrences[SUSPICIOUS]['distinct'],
'y': occurrences[SUSPICIOUS]['total'],
'occ': occurrences[SUSPICIOUS]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[SUSPICIOUS]
}]
}]
})
return charts | Chart for most visited pages. | Below is the the instruction that describes the task:
### Input:
Chart for most visited pages.
### Response:
def most_visited_pages_charts():
"""Chart for most visited pages."""
stats = most_visited_pages_stats()
charts = []
for i, stat in enumerate(stats['more_than_10']):
bound = stat['bound']
subset = stat['subset']
chart_options = {
'chart': {
'type': 'bar',
'height': 15 * len(subset) + 100
},
'title': {
'text': {0: _('More than %d times') % bound}.get(
i, _('Between %d and %d times') % (
bound, stats['more_than_10'][i - 1]['bound']))
},
'xAxis': {
'categories': [u for (u, c, t) in subset],
'title': {
'text': None
}
},
'yAxis': {
'title': {
'text': None
}
},
'plotOptions': {
'bar': {
'dataLabels': {
'enabled': True
}
},
},
'tooltip': {
'enabled': False
},
'legend': {
'enabled': False
},
'credits': {
'enabled': False
},
}
series_data = []
for index, (url, count, url_type) in enumerate(subset):
data = {
'x': index,
'y': count
}
color = URL_TYPE_COLOR[url_type]
data['color'] = color
series_data.append(data)
chart_options['series'] = [{
'name': _('Requests'),
'data': series_data
}]
charts.append(chart_options)
point_formatter_code = """
return '<br>%s: <strong>' + this.dis + '</strong>(' +
Highcharts.numberFormat(this.dis / this.total_dis * 100, 1) + '%%)' +
'<br>%s: <strong>' + this.occ + '</strong> (' +
Highcharts.numberFormat(this.occ / this.total_occ * 100, 1) + '%%)';
""" % (_('Distinct URLs'), _('Occurrences'))
occurrences = stats['less_than_10']
total_distinct = sum([v['distinct'] for k, v in occurrences.items()])
total_occurrences = sum([v['total'] for k, v in occurrences.items()])
charts.append({
'chart': {
'plotBackgroundColor': None,
'plotBorderWidth': None,
'plotShadow': False,
'type': 'pie'
},
'title': {
'text': _('Less than 10 (type repartition)')
},
'plotOptions': {
'pie': {
'allowPointSelect': True,
'cursor': 'pointer',
'dataLabels': {
'enabled': False
},
'showInLegend': True,
'tooltip': {
'pointFormatter': point_formatter_code
},
}
},
'series': [{
'name': '',
'colorByPoint': True,
'data': [{
'name': _('Valid project URL'),
'dis': occurrences[PROJECT]['distinct'],
'y': occurrences[PROJECT]['total'],
'occ': occurrences[PROJECT]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[PROJECT]
}, {
'name': _('Old project URL'),
'dis': occurrences[OLD_PROJECT]['distinct'],
'y': occurrences[OLD_PROJECT]['total'],
'occ': occurrences[OLD_PROJECT]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[OLD_PROJECT]
}, {
'name': _('Valid asset URL'),
'dis': occurrences[ASSET]['distinct'],
'y': occurrences[ASSET]['total'],
'occ': occurrences[ASSET]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[ASSET]
}, {
'name': _('Old asset URL'),
'dis': occurrences[OLD_ASSET]['distinct'],
'y': occurrences[OLD_ASSET]['total'],
'occ': occurrences[OLD_ASSET]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[OLD_ASSET]
}, {
'name': _('Common asset URL'),
'dis': occurrences[COMMON_ASSET]['distinct'],
'y': occurrences[COMMON_ASSET]['total'],
'occ': occurrences[COMMON_ASSET]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[COMMON_ASSET]
}, {
'name': _('False-negative project URL'),
'dis': occurrences[FALSE_NEGATIVE]['distinct'],
'y': occurrences[FALSE_NEGATIVE]['total'],
'occ': occurrences[FALSE_NEGATIVE]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[FALSE_NEGATIVE]
}, {
'name': _('Suspicious URL (potential attack)'),
'dis': occurrences[SUSPICIOUS]['distinct'],
'y': occurrences[SUSPICIOUS]['total'],
'occ': occurrences[SUSPICIOUS]['total'],
'total_dis': total_distinct,
'total_occ': total_occurrences,
'color': URL_TYPE_COLOR[SUSPICIOUS]
}]
}]
})
return charts |
def pre_process_data(filepath):
"""
This is dependent on your training data source but we will try to generalize it as best as possible.
"""
positive_path = os.path.join(filepath, 'pos')
negative_path = os.path.join(filepath, 'neg')
pos_label = 1
neg_label = 0
dataset = []
for filename in glob.glob(os.path.join(positive_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((pos_label, f.read()))
for filename in glob.glob(os.path.join(negative_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((neg_label, f.read()))
shuffle(dataset)
return dataset | This is dependent on your training data source but we will try to generalize it as best as possible. | Below is the the instruction that describes the task:
### Input:
This is dependent on your training data source but we will try to generalize it as best as possible.
### Response:
def pre_process_data(filepath):
"""
This is dependent on your training data source but we will try to generalize it as best as possible.
"""
positive_path = os.path.join(filepath, 'pos')
negative_path = os.path.join(filepath, 'neg')
pos_label = 1
neg_label = 0
dataset = []
for filename in glob.glob(os.path.join(positive_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((pos_label, f.read()))
for filename in glob.glob(os.path.join(negative_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((neg_label, f.read()))
shuffle(dataset)
return dataset |
def get_source(fileobj):
"""Translate fileobj into file contents.
fileobj is either a string or a dict. If it's a string, that's the
file contents. If it's a string, then the filename key contains
the name of the file whose contents we are to use.
If the dict contains a true value for the key delete_after_use,
the file should be deleted once read.
"""
if not isinstance(fileobj, dict):
return fileobj
else:
try:
with io.open(fileobj["filename"], encoding="utf-8",
errors="ignore") as f:
return f.read()
finally:
if fileobj.get('delete_after_use'):
try:
os.remove(fileobj["filename"])
except: # pragma: no cover
pass | Translate fileobj into file contents.
fileobj is either a string or a dict. If it's a string, that's the
file contents. If it's a string, then the filename key contains
the name of the file whose contents we are to use.
If the dict contains a true value for the key delete_after_use,
the file should be deleted once read. | Below is the the instruction that describes the task:
### Input:
Translate fileobj into file contents.
fileobj is either a string or a dict. If it's a string, that's the
file contents. If it's a string, then the filename key contains
the name of the file whose contents we are to use.
If the dict contains a true value for the key delete_after_use,
the file should be deleted once read.
### Response:
def get_source(fileobj):
"""Translate fileobj into file contents.
fileobj is either a string or a dict. If it's a string, that's the
file contents. If it's a string, then the filename key contains
the name of the file whose contents we are to use.
If the dict contains a true value for the key delete_after_use,
the file should be deleted once read.
"""
if not isinstance(fileobj, dict):
return fileobj
else:
try:
with io.open(fileobj["filename"], encoding="utf-8",
errors="ignore") as f:
return f.read()
finally:
if fileobj.get('delete_after_use'):
try:
os.remove(fileobj["filename"])
except: # pragma: no cover
pass |
def classify_users(X_test, model, classifier_type, meta_model, upper_cutoff):
"""
Uses a trained model and the unlabelled features to associate users with labels.
The decision is done as per scikit-learn:
http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html
http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC.predict
Inputs: - feature_matrix: The graph based-features in either NumPy or SciPy sparse array format.
- model: A trained scikit-learn One-vs-All multi-label scheme of linear SVC models.
Output: - decision_weights: A NumPy array containing the distance of each user from each label discriminator.
"""
if classifier_type == "LinearSVC":
prediction = model.decision_function(X_test)
# prediction = penalize_large_classes(prediction)
meta_prediction = meta_model.predict(X_test)
meta_prediction = np.rint(meta_prediction) + 1
meta_prediction[meta_prediction > upper_cutoff] = upper_cutoff
meta_prediction[meta_prediction < 2] = 2
prediction_indices = np.argsort(prediction, axis=1)
prediction_row = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_col = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_data = np.empty(int(np.sum(meta_prediction)), dtype=np.float64)
nnz_counter = 0
for i in range(X_test.shape[0]):
jj = prediction_indices[i, -int(meta_prediction[i]):]
dd = prediction[i, jj]
prediction_row[nnz_counter:nnz_counter+int(meta_prediction[i])] = i
prediction_col[nnz_counter:nnz_counter+int(meta_prediction[i])] = jj
prediction_data[nnz_counter:nnz_counter+int(meta_prediction[i])] = dd
nnz_counter += int(meta_prediction[i])
prediction = spsp.coo_matrix((prediction_data,
(prediction_row,
prediction_col)),
shape=prediction.shape)
prediction = normalize(prediction, norm="l2", axis=0)
elif classifier_type == "LogisticRegression":
prediction = model.predict_proba(X_test)
# prediction = penalize_large_classes(prediction)
meta_prediction = meta_model.predict(X_test)
meta_prediction = np.rint(meta_prediction) + 1
meta_prediction[meta_prediction > upper_cutoff] = upper_cutoff
meta_prediction[meta_prediction < 2] = 2
prediction_indices = np.argsort(prediction, axis=1)
prediction_row = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_col = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_data = np.empty(int(np.sum(meta_prediction)), dtype=np.float64)
nnz_counter = 0
for i in range(X_test.shape[0]):
jj = prediction_indices[i, -int(meta_prediction[i]):]
dd = prediction[i, jj]
prediction_row[nnz_counter:nnz_counter+int(meta_prediction[i])] = i
prediction_col[nnz_counter:nnz_counter+int(meta_prediction[i])] = jj
prediction_data[nnz_counter:nnz_counter+int(meta_prediction[i])] = dd
nnz_counter += int(meta_prediction[i])
prediction = spsp.coo_matrix((prediction_data,
(prediction_row,
prediction_col)),
shape=prediction.shape)
elif classifier_type == "RandomForest":
if issparse(X_test):
prediction = model.predict_proba(X_test.tocsr())
else:
prediction = model.predict_proba(X_test)
# prediction = penalize_large_classes(prediction)
meta_prediction = meta_model.predict(X_test)
meta_prediction = np.rint(meta_prediction) + 1
meta_prediction[meta_prediction > upper_cutoff] = upper_cutoff
meta_prediction[meta_prediction < 2] = 2
prediction_indices = np.argsort(prediction, axis=1)
prediction_row = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_col = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_data = np.empty(int(np.sum(meta_prediction)), dtype=np.float64)
nnz_counter = 0
for i in range(X_test.shape[0]):
jj = prediction_indices[i, -int(meta_prediction[i]):]
dd = prediction[i, jj]
prediction_row[nnz_counter:nnz_counter+int(meta_prediction[i])] = i
prediction_col[nnz_counter:nnz_counter+int(meta_prediction[i])] = jj
prediction_data[nnz_counter:nnz_counter+int(meta_prediction[i])] = dd
nnz_counter += int(meta_prediction[i])
prediction = spsp.coo_matrix((prediction_data,
(prediction_row,
prediction_col)),
shape=prediction.shape)
else:
print("Invalid classifier type.")
raise RuntimeError
return prediction | Uses a trained model and the unlabelled features to associate users with labels.
The decision is done as per scikit-learn:
http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html
http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC.predict
Inputs: - feature_matrix: The graph based-features in either NumPy or SciPy sparse array format.
- model: A trained scikit-learn One-vs-All multi-label scheme of linear SVC models.
Output: - decision_weights: A NumPy array containing the distance of each user from each label discriminator. | Below is the the instruction that describes the task:
### Input:
Uses a trained model and the unlabelled features to associate users with labels.
The decision is done as per scikit-learn:
http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html
http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC.predict
Inputs: - feature_matrix: The graph based-features in either NumPy or SciPy sparse array format.
- model: A trained scikit-learn One-vs-All multi-label scheme of linear SVC models.
Output: - decision_weights: A NumPy array containing the distance of each user from each label discriminator.
### Response:
def classify_users(X_test, model, classifier_type, meta_model, upper_cutoff):
"""
Uses a trained model and the unlabelled features to associate users with labels.
The decision is done as per scikit-learn:
http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html
http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC.predict
Inputs: - feature_matrix: The graph based-features in either NumPy or SciPy sparse array format.
- model: A trained scikit-learn One-vs-All multi-label scheme of linear SVC models.
Output: - decision_weights: A NumPy array containing the distance of each user from each label discriminator.
"""
if classifier_type == "LinearSVC":
prediction = model.decision_function(X_test)
# prediction = penalize_large_classes(prediction)
meta_prediction = meta_model.predict(X_test)
meta_prediction = np.rint(meta_prediction) + 1
meta_prediction[meta_prediction > upper_cutoff] = upper_cutoff
meta_prediction[meta_prediction < 2] = 2
prediction_indices = np.argsort(prediction, axis=1)
prediction_row = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_col = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_data = np.empty(int(np.sum(meta_prediction)), dtype=np.float64)
nnz_counter = 0
for i in range(X_test.shape[0]):
jj = prediction_indices[i, -int(meta_prediction[i]):]
dd = prediction[i, jj]
prediction_row[nnz_counter:nnz_counter+int(meta_prediction[i])] = i
prediction_col[nnz_counter:nnz_counter+int(meta_prediction[i])] = jj
prediction_data[nnz_counter:nnz_counter+int(meta_prediction[i])] = dd
nnz_counter += int(meta_prediction[i])
prediction = spsp.coo_matrix((prediction_data,
(prediction_row,
prediction_col)),
shape=prediction.shape)
prediction = normalize(prediction, norm="l2", axis=0)
elif classifier_type == "LogisticRegression":
prediction = model.predict_proba(X_test)
# prediction = penalize_large_classes(prediction)
meta_prediction = meta_model.predict(X_test)
meta_prediction = np.rint(meta_prediction) + 1
meta_prediction[meta_prediction > upper_cutoff] = upper_cutoff
meta_prediction[meta_prediction < 2] = 2
prediction_indices = np.argsort(prediction, axis=1)
prediction_row = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_col = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_data = np.empty(int(np.sum(meta_prediction)), dtype=np.float64)
nnz_counter = 0
for i in range(X_test.shape[0]):
jj = prediction_indices[i, -int(meta_prediction[i]):]
dd = prediction[i, jj]
prediction_row[nnz_counter:nnz_counter+int(meta_prediction[i])] = i
prediction_col[nnz_counter:nnz_counter+int(meta_prediction[i])] = jj
prediction_data[nnz_counter:nnz_counter+int(meta_prediction[i])] = dd
nnz_counter += int(meta_prediction[i])
prediction = spsp.coo_matrix((prediction_data,
(prediction_row,
prediction_col)),
shape=prediction.shape)
elif classifier_type == "RandomForest":
if issparse(X_test):
prediction = model.predict_proba(X_test.tocsr())
else:
prediction = model.predict_proba(X_test)
# prediction = penalize_large_classes(prediction)
meta_prediction = meta_model.predict(X_test)
meta_prediction = np.rint(meta_prediction) + 1
meta_prediction[meta_prediction > upper_cutoff] = upper_cutoff
meta_prediction[meta_prediction < 2] = 2
prediction_indices = np.argsort(prediction, axis=1)
prediction_row = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_col = np.empty(int(np.sum(meta_prediction)), dtype=np.int32)
prediction_data = np.empty(int(np.sum(meta_prediction)), dtype=np.float64)
nnz_counter = 0
for i in range(X_test.shape[0]):
jj = prediction_indices[i, -int(meta_prediction[i]):]
dd = prediction[i, jj]
prediction_row[nnz_counter:nnz_counter+int(meta_prediction[i])] = i
prediction_col[nnz_counter:nnz_counter+int(meta_prediction[i])] = jj
prediction_data[nnz_counter:nnz_counter+int(meta_prediction[i])] = dd
nnz_counter += int(meta_prediction[i])
prediction = spsp.coo_matrix((prediction_data,
(prediction_row,
prediction_col)),
shape=prediction.shape)
else:
print("Invalid classifier type.")
raise RuntimeError
return prediction |
def deptree(self, field, oids, date=None, level=None, table=None):
'''
Dependency tree builder. Recursively fetchs objects that
are children of the initial set of parent object ids provided.
:param field: Field that contains the 'parent of' data
:param oids: Object oids to build depedency tree for
:param date: date (metrique date range) that should be queried.
If date==None then the most recent versions of the
objects will be queried.
:param level: limit depth of recursion
'''
table = self.get_table(table)
fringe = str2list(oids)
checked = set(fringe)
loop_k = 0
while len(fringe) > 0:
if level and loop_k == abs(level):
break
query = '_oid in %s' % list(fringe)
docs = self.find(table=table, query=query, fields=[field],
date=date, raw=True)
fringe = {oid for doc in docs for oid in (doc[field] or [])
if oid not in checked}
checked |= fringe
loop_k += 1
return sorted(checked) | Dependency tree builder. Recursively fetchs objects that
are children of the initial set of parent object ids provided.
:param field: Field that contains the 'parent of' data
:param oids: Object oids to build depedency tree for
:param date: date (metrique date range) that should be queried.
If date==None then the most recent versions of the
objects will be queried.
:param level: limit depth of recursion | Below is the the instruction that describes the task:
### Input:
Dependency tree builder. Recursively fetchs objects that
are children of the initial set of parent object ids provided.
:param field: Field that contains the 'parent of' data
:param oids: Object oids to build depedency tree for
:param date: date (metrique date range) that should be queried.
If date==None then the most recent versions of the
objects will be queried.
:param level: limit depth of recursion
### Response:
def deptree(self, field, oids, date=None, level=None, table=None):
'''
Dependency tree builder. Recursively fetchs objects that
are children of the initial set of parent object ids provided.
:param field: Field that contains the 'parent of' data
:param oids: Object oids to build depedency tree for
:param date: date (metrique date range) that should be queried.
If date==None then the most recent versions of the
objects will be queried.
:param level: limit depth of recursion
'''
table = self.get_table(table)
fringe = str2list(oids)
checked = set(fringe)
loop_k = 0
while len(fringe) > 0:
if level and loop_k == abs(level):
break
query = '_oid in %s' % list(fringe)
docs = self.find(table=table, query=query, fields=[field],
date=date, raw=True)
fringe = {oid for doc in docs for oid in (doc[field] or [])
if oid not in checked}
checked |= fringe
loop_k += 1
return sorted(checked) |
def create_pywbem_ssl_context():
""" Create an SSL context based on what is commonly accepted as the
required limitations. This code attempts to create the same context for
Python 2 and Python 3 except for the ciphers
This list is based on what is currently defined in the Python SSL
module create_default_context function
This includes:
* Disallow SSLV2 and SSLV3
* Allow TLSV1 TLSV1.1, TLSV1.2
* No compression
* Single DH Use and Single ECDH use
cacerts info is set independently so is not part of our context setter.
"""
if six.PY2:
context = SSL.Context('sslv23')
# Many of the flags are not in the M2Crypto source so they were taken
# from OpenSSL SSL.h module as flags.
SSL.context.set_options(SSL.SSL_OP_NO_SSLv2 |
0x02000000 | # OP_NO_SSLV3
0x00020000 | # OP_NO_COMPRESSION
0x00100000 | # OP_SINGLE_DH_USE
0x00400000 | # OP_CIPHER_SERVER_PREFERENCE
0x00080000) # OP_SINGLE_ECDH_USE
else:
# The choice for the Python SSL module is whether to use the
# create_default directly and possibly have different limits depending
# on which version of Python you use or to set the attributes
# directly based on a currently used SSL
context = SSL.create_default_context(purpose=SSL.Purpose.CLIENT_AUTH)
# Variable settings per SSL create_default_context. These are what
# the function above sets for Python 3.4
# context = SSLContext(PROTOCOL_SSLv23)
# context.options |= OP_NO_SSLv2
# context.options |= OP_NO_SSLv3
# context.options |= getattr(SSL, "OP_NO_COMPRESSION", 0)
# context.options |= getattr(SSL, "OP_CIPHER_SERVER_PREFERENCE", 0)
# context.options |= getattr(SSL, "OP_SINGLE_DH_USE", 0)
# context.options |= getattr(SSL, "OP_SINGLE_ECDH_USE", 0)
# context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
return context | Create an SSL context based on what is commonly accepted as the
required limitations. This code attempts to create the same context for
Python 2 and Python 3 except for the ciphers
This list is based on what is currently defined in the Python SSL
module create_default_context function
This includes:
* Disallow SSLV2 and SSLV3
* Allow TLSV1 TLSV1.1, TLSV1.2
* No compression
* Single DH Use and Single ECDH use
cacerts info is set independently so is not part of our context setter. | Below is the the instruction that describes the task:
### Input:
Create an SSL context based on what is commonly accepted as the
required limitations. This code attempts to create the same context for
Python 2 and Python 3 except for the ciphers
This list is based on what is currently defined in the Python SSL
module create_default_context function
This includes:
* Disallow SSLV2 and SSLV3
* Allow TLSV1 TLSV1.1, TLSV1.2
* No compression
* Single DH Use and Single ECDH use
cacerts info is set independently so is not part of our context setter.
### Response:
def create_pywbem_ssl_context():
""" Create an SSL context based on what is commonly accepted as the
required limitations. This code attempts to create the same context for
Python 2 and Python 3 except for the ciphers
This list is based on what is currently defined in the Python SSL
module create_default_context function
This includes:
* Disallow SSLV2 and SSLV3
* Allow TLSV1 TLSV1.1, TLSV1.2
* No compression
* Single DH Use and Single ECDH use
cacerts info is set independently so is not part of our context setter.
"""
if six.PY2:
context = SSL.Context('sslv23')
# Many of the flags are not in the M2Crypto source so they were taken
# from OpenSSL SSL.h module as flags.
SSL.context.set_options(SSL.SSL_OP_NO_SSLv2 |
0x02000000 | # OP_NO_SSLV3
0x00020000 | # OP_NO_COMPRESSION
0x00100000 | # OP_SINGLE_DH_USE
0x00400000 | # OP_CIPHER_SERVER_PREFERENCE
0x00080000) # OP_SINGLE_ECDH_USE
else:
# The choice for the Python SSL module is whether to use the
# create_default directly and possibly have different limits depending
# on which version of Python you use or to set the attributes
# directly based on a currently used SSL
context = SSL.create_default_context(purpose=SSL.Purpose.CLIENT_AUTH)
# Variable settings per SSL create_default_context. These are what
# the function above sets for Python 3.4
# context = SSLContext(PROTOCOL_SSLv23)
# context.options |= OP_NO_SSLv2
# context.options |= OP_NO_SSLv3
# context.options |= getattr(SSL, "OP_NO_COMPRESSION", 0)
# context.options |= getattr(SSL, "OP_CIPHER_SERVER_PREFERENCE", 0)
# context.options |= getattr(SSL, "OP_SINGLE_DH_USE", 0)
# context.options |= getattr(SSL, "OP_SINGLE_ECDH_USE", 0)
# context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
return context |
def rps_at(self, t):
'''Return rps for second t'''
if 0 <= t <= self.duration:
return self.minrps + \
float(self.maxrps - self.minrps) * t / self.duration
else:
return 0 | Return rps for second t | Below is the the instruction that describes the task:
### Input:
Return rps for second t
### Response:
def rps_at(self, t):
'''Return rps for second t'''
if 0 <= t <= self.duration:
return self.minrps + \
float(self.maxrps - self.minrps) * t / self.duration
else:
return 0 |
def construct(self, **bindings):
"""Constructs the graph and returns either a tensor or a sequence.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this.
"""
context = _assign_values_to_unbound_vars(self._unbound_vars, bindings)
context.update(self._partial_context)
return self._construct(context) | Constructs the graph and returns either a tensor or a sequence.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this. | Below is the the instruction that describes the task:
### Input:
Constructs the graph and returns either a tensor or a sequence.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this.
### Response:
def construct(self, **bindings):
"""Constructs the graph and returns either a tensor or a sequence.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this.
"""
context = _assign_values_to_unbound_vars(self._unbound_vars, bindings)
context.update(self._partial_context)
return self._construct(context) |
def get_student_current_grades(self, username, course_ids=None):
"""
Returns a CurrentGradesByUser object with the user current grades.
Args:
username (str): an edx user's username
course_ids (list): a list of edX course ids.
Returns:
CurrentGradesByUser: object representing the student current grades
"""
# if no course ids are provided, let's get the user enrollments
if course_ids is None:
enrollments_client = CourseEnrollments(self.requester, self.base_url)
enrollments = enrollments_client.get_student_enrollments()
course_ids = list(enrollments.get_enrolled_course_ids())
all_current_grades = []
for course_id in course_ids:
try:
all_current_grades.append(self.get_student_current_grade(username, course_id))
except HTTPError as error:
if error.response.status_code >= 500:
raise
return CurrentGradesByUser(all_current_grades) | Returns a CurrentGradesByUser object with the user current grades.
Args:
username (str): an edx user's username
course_ids (list): a list of edX course ids.
Returns:
CurrentGradesByUser: object representing the student current grades | Below is the the instruction that describes the task:
### Input:
Returns a CurrentGradesByUser object with the user current grades.
Args:
username (str): an edx user's username
course_ids (list): a list of edX course ids.
Returns:
CurrentGradesByUser: object representing the student current grades
### Response:
def get_student_current_grades(self, username, course_ids=None):
"""
Returns a CurrentGradesByUser object with the user current grades.
Args:
username (str): an edx user's username
course_ids (list): a list of edX course ids.
Returns:
CurrentGradesByUser: object representing the student current grades
"""
# if no course ids are provided, let's get the user enrollments
if course_ids is None:
enrollments_client = CourseEnrollments(self.requester, self.base_url)
enrollments = enrollments_client.get_student_enrollments()
course_ids = list(enrollments.get_enrolled_course_ids())
all_current_grades = []
for course_id in course_ids:
try:
all_current_grades.append(self.get_student_current_grade(username, course_id))
except HTTPError as error:
if error.response.status_code >= 500:
raise
return CurrentGradesByUser(all_current_grades) |
def print_line(line):
"""
Print given line to stdout (i3bar).
"""
sys.__stdout__.write("{}\n".format(line))
sys.__stdout__.flush() | Print given line to stdout (i3bar). | Below is the the instruction that describes the task:
### Input:
Print given line to stdout (i3bar).
### Response:
def print_line(line):
"""
Print given line to stdout (i3bar).
"""
sys.__stdout__.write("{}\n".format(line))
sys.__stdout__.flush() |
async def send_maps(self, map_list):
"""Sends a request to the server containing maps (dicts)."""
params = {
'VER': 8, # channel protocol version
'RID': 81188, # request identifier
'ctype': 'hangouts', # client type
}
if self._gsessionid_param is not None:
params['gsessionid'] = self._gsessionid_param
if self._sid_param is not None:
params['SID'] = self._sid_param
data_dict = dict(count=len(map_list), ofs=0)
for map_num, map_ in enumerate(map_list):
for map_key, map_val in map_.items():
data_dict['req{}_{}'.format(map_num, map_key)] = map_val
res = await self._session.fetch(
'post', CHANNEL_URL, params=params, data=data_dict
)
return res | Sends a request to the server containing maps (dicts). | Below is the the instruction that describes the task:
### Input:
Sends a request to the server containing maps (dicts).
### Response:
async def send_maps(self, map_list):
"""Sends a request to the server containing maps (dicts)."""
params = {
'VER': 8, # channel protocol version
'RID': 81188, # request identifier
'ctype': 'hangouts', # client type
}
if self._gsessionid_param is not None:
params['gsessionid'] = self._gsessionid_param
if self._sid_param is not None:
params['SID'] = self._sid_param
data_dict = dict(count=len(map_list), ofs=0)
for map_num, map_ in enumerate(map_list):
for map_key, map_val in map_.items():
data_dict['req{}_{}'.format(map_num, map_key)] = map_val
res = await self._session.fetch(
'post', CHANNEL_URL, params=params, data=data_dict
)
return res |
def add_filename_pattern(self, dir_name, pattern):
"""
Adds a Unix shell-style wildcard pattern underneath the specified directory
:param dir_name: str: directory that contains the pattern
:param pattern: str: Unix shell-style wildcard pattern
"""
full_pattern = '{}{}{}'.format(dir_name, os.sep, pattern)
filename_regex = fnmatch.translate(full_pattern)
self.regex_list.append(re.compile(filename_regex)) | Adds a Unix shell-style wildcard pattern underneath the specified directory
:param dir_name: str: directory that contains the pattern
:param pattern: str: Unix shell-style wildcard pattern | Below is the the instruction that describes the task:
### Input:
Adds a Unix shell-style wildcard pattern underneath the specified directory
:param dir_name: str: directory that contains the pattern
:param pattern: str: Unix shell-style wildcard pattern
### Response:
def add_filename_pattern(self, dir_name, pattern):
"""
Adds a Unix shell-style wildcard pattern underneath the specified directory
:param dir_name: str: directory that contains the pattern
:param pattern: str: Unix shell-style wildcard pattern
"""
full_pattern = '{}{}{}'.format(dir_name, os.sep, pattern)
filename_regex = fnmatch.translate(full_pattern)
self.regex_list.append(re.compile(filename_regex)) |
def proxyInit(self):
"""
To receive events the proxy has to tell the CCU / Homegear where to send the events. For that we call the init-method.
"""
# Call init() with local XML RPC config and interface_id (the name of
# the receiver) to receive events. XML RPC server has to be running.
for interface_id, proxy in self.proxies.items():
if proxy._skipinit:
continue
if proxy._callbackip and proxy._callbackport:
callbackip = proxy._callbackip
callbackport = proxy._callbackport
else:
callbackip = proxy._localip
callbackport = self._localport
LOG.debug("ServerThread.proxyInit: init('http://%s:%i', '%s')" %
(callbackip, callbackport, interface_id))
try:
proxy.init("http://%s:%i" %
(callbackip, callbackport), interface_id)
LOG.info("Proxy initialized")
except Exception as err:
LOG.debug("proxyInit: Exception: %s" % str(err))
LOG.warning("Failed to initialize proxy")
self.failed_inits.append(interface_id) | To receive events the proxy has to tell the CCU / Homegear where to send the events. For that we call the init-method. | Below is the the instruction that describes the task:
### Input:
To receive events the proxy has to tell the CCU / Homegear where to send the events. For that we call the init-method.
### Response:
def proxyInit(self):
"""
To receive events the proxy has to tell the CCU / Homegear where to send the events. For that we call the init-method.
"""
# Call init() with local XML RPC config and interface_id (the name of
# the receiver) to receive events. XML RPC server has to be running.
for interface_id, proxy in self.proxies.items():
if proxy._skipinit:
continue
if proxy._callbackip and proxy._callbackport:
callbackip = proxy._callbackip
callbackport = proxy._callbackport
else:
callbackip = proxy._localip
callbackport = self._localport
LOG.debug("ServerThread.proxyInit: init('http://%s:%i', '%s')" %
(callbackip, callbackport, interface_id))
try:
proxy.init("http://%s:%i" %
(callbackip, callbackport), interface_id)
LOG.info("Proxy initialized")
except Exception as err:
LOG.debug("proxyInit: Exception: %s" % str(err))
LOG.warning("Failed to initialize proxy")
self.failed_inits.append(interface_id) |
async def add_shade_to_scene(self, shade_id, scene_id, position=None):
"""Add a shade to a scene."""
if position is None:
_shade = await self.get_shade(shade_id)
position = await _shade.get_current_position()
await (SceneMembers(self.request)).create_scene_member(
position, scene_id, shade_id
) | Add a shade to a scene. | Below is the the instruction that describes the task:
### Input:
Add a shade to a scene.
### Response:
async def add_shade_to_scene(self, shade_id, scene_id, position=None):
"""Add a shade to a scene."""
if position is None:
_shade = await self.get_shade(shade_id)
position = await _shade.get_current_position()
await (SceneMembers(self.request)).create_scene_member(
position, scene_id, shade_id
) |
async def verify(self, message: bytes, signature: bytes, signer: str = None) -> bool:
"""
Verify signature with input signer verification key (via lookup by DID first if need be).
Raise WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param signature: signature, as bytes
:param signer: signer DID or verification key; omit for anchor's own
:return: whether signature is valid
"""
LOGGER.debug('BaseAnchor.verify >>> signer: %s, message: %s, signature: %s', signer, message, signature)
if not self.wallet.handle:
LOGGER.debug('BaseAnchor.verify <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
verkey = None
if signer:
verkey = await self._verkey_for(signer)
rv = await self.wallet.verify(message, signature, verkey)
LOGGER.debug('BaseAnchor.verify <<< %s', rv)
return rv | Verify signature with input signer verification key (via lookup by DID first if need be).
Raise WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param signature: signature, as bytes
:param signer: signer DID or verification key; omit for anchor's own
:return: whether signature is valid | Below is the the instruction that describes the task:
### Input:
Verify signature with input signer verification key (via lookup by DID first if need be).
Raise WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param signature: signature, as bytes
:param signer: signer DID or verification key; omit for anchor's own
:return: whether signature is valid
### Response:
async def verify(self, message: bytes, signature: bytes, signer: str = None) -> bool:
"""
Verify signature with input signer verification key (via lookup by DID first if need be).
Raise WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param signature: signature, as bytes
:param signer: signer DID or verification key; omit for anchor's own
:return: whether signature is valid
"""
LOGGER.debug('BaseAnchor.verify >>> signer: %s, message: %s, signature: %s', signer, message, signature)
if not self.wallet.handle:
LOGGER.debug('BaseAnchor.verify <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
verkey = None
if signer:
verkey = await self._verkey_for(signer)
rv = await self.wallet.verify(message, signature, verkey)
LOGGER.debug('BaseAnchor.verify <<< %s', rv)
return rv |
def DbExportEvent(self, argin):
""" Export Event channel to database
:param argin: Str[0] = event channel name (or factory name)
Str[1] = CORBA IOR
Str[2] = Notifd host name
Str[3] = Notifd pid
Str[4] = Notifd version
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbExportEvent()")
if len(argin) < 5:
self.warn_stream("DataBase::db_export_event(): insufficient export info for event ")
th_exc(DB_IncorrectArguments,
"insufficient export info for event",
"DataBase::ExportEvent()")
event, IOR, host, pid, version = argin[:5]
event = replace_wildcard(event.lower())
self.db.export_event(event, IOR, host, pid, version) | Export Event channel to database
:param argin: Str[0] = event channel name (or factory name)
Str[1] = CORBA IOR
Str[2] = Notifd host name
Str[3] = Notifd pid
Str[4] = Notifd version
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid | Below is the the instruction that describes the task:
### Input:
Export Event channel to database
:param argin: Str[0] = event channel name (or factory name)
Str[1] = CORBA IOR
Str[2] = Notifd host name
Str[3] = Notifd pid
Str[4] = Notifd version
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid
### Response:
def DbExportEvent(self, argin):
""" Export Event channel to database
:param argin: Str[0] = event channel name (or factory name)
Str[1] = CORBA IOR
Str[2] = Notifd host name
Str[3] = Notifd pid
Str[4] = Notifd version
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbExportEvent()")
if len(argin) < 5:
self.warn_stream("DataBase::db_export_event(): insufficient export info for event ")
th_exc(DB_IncorrectArguments,
"insufficient export info for event",
"DataBase::ExportEvent()")
event, IOR, host, pid, version = argin[:5]
event = replace_wildcard(event.lower())
self.db.export_event(event, IOR, host, pid, version) |
def target(key, full=True):
'''
Return the basename of a SysFS key path
:param key: the location to resolve within SysFS
:param full: full path instead of basename
:return: fullpath or basename of path
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/ttyS0
'''
if not key.startswith('/sys'):
key = os.path.join('/sys', key)
key = os.path.realpath(key)
if not os.path.exists(key):
log.debug('Unkown SysFS key %s', key)
return False
elif full:
return key
else:
return os.path.basename(key) | Return the basename of a SysFS key path
:param key: the location to resolve within SysFS
:param full: full path instead of basename
:return: fullpath or basename of path
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/ttyS0 | Below is the the instruction that describes the task:
### Input:
Return the basename of a SysFS key path
:param key: the location to resolve within SysFS
:param full: full path instead of basename
:return: fullpath or basename of path
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/ttyS0
### Response:
def target(key, full=True):
'''
Return the basename of a SysFS key path
:param key: the location to resolve within SysFS
:param full: full path instead of basename
:return: fullpath or basename of path
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/ttyS0
'''
if not key.startswith('/sys'):
key = os.path.join('/sys', key)
key = os.path.realpath(key)
if not os.path.exists(key):
log.debug('Unkown SysFS key %s', key)
return False
elif full:
return key
else:
return os.path.basename(key) |
def m2i(self, pkt, s):
"""
ASN1F_SEQUENCE behaves transparently, with nested ASN1_objects being
dissected one by one. Because we use obj.dissect (see loop below)
instead of obj.m2i (as we trust dissect to do the appropriate set_vals)
we do not directly retrieve the list of nested objects.
Thus m2i returns an empty list (along with the proper remainder).
It is discarded by dissect() and should not be missed elsewhere.
"""
diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
safe=self.flexible_tag)
if diff_tag is not None:
if self.implicit_tag is not None:
self.implicit_tag = diff_tag
elif self.explicit_tag is not None:
self.explicit_tag = diff_tag
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
i, s, remain = codec.check_type_check_len(s)
if len(s) == 0:
for obj in self.seq:
obj.set_val(pkt, None)
else:
for obj in self.seq:
try:
s = obj.dissect(pkt, s)
except ASN1F_badsequence:
break
if len(s) > 0:
raise BER_Decoding_Error("unexpected remainder", remaining=s)
return [], remain | ASN1F_SEQUENCE behaves transparently, with nested ASN1_objects being
dissected one by one. Because we use obj.dissect (see loop below)
instead of obj.m2i (as we trust dissect to do the appropriate set_vals)
we do not directly retrieve the list of nested objects.
Thus m2i returns an empty list (along with the proper remainder).
It is discarded by dissect() and should not be missed elsewhere. | Below is the the instruction that describes the task:
### Input:
ASN1F_SEQUENCE behaves transparently, with nested ASN1_objects being
dissected one by one. Because we use obj.dissect (see loop below)
instead of obj.m2i (as we trust dissect to do the appropriate set_vals)
we do not directly retrieve the list of nested objects.
Thus m2i returns an empty list (along with the proper remainder).
It is discarded by dissect() and should not be missed elsewhere.
### Response:
def m2i(self, pkt, s):
"""
ASN1F_SEQUENCE behaves transparently, with nested ASN1_objects being
dissected one by one. Because we use obj.dissect (see loop below)
instead of obj.m2i (as we trust dissect to do the appropriate set_vals)
we do not directly retrieve the list of nested objects.
Thus m2i returns an empty list (along with the proper remainder).
It is discarded by dissect() and should not be missed elsewhere.
"""
diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
safe=self.flexible_tag)
if diff_tag is not None:
if self.implicit_tag is not None:
self.implicit_tag = diff_tag
elif self.explicit_tag is not None:
self.explicit_tag = diff_tag
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
i, s, remain = codec.check_type_check_len(s)
if len(s) == 0:
for obj in self.seq:
obj.set_val(pkt, None)
else:
for obj in self.seq:
try:
s = obj.dissect(pkt, s)
except ASN1F_badsequence:
break
if len(s) > 0:
raise BER_Decoding_Error("unexpected remainder", remaining=s)
return [], remain |
def origin(hosts):
"""
Return a function that returns a valid HTTP Origin or localhost
if none found.
"""
hosts = [urlsplit(h) for h in hosts]
def func(environ):
if 'ISSO_CORS_ORIGIN' in environ:
return environ['ISSO_CORS_ORIGIN']
if not hosts:
return "http://invalid.local"
loc = environ.get("HTTP_ORIGIN", environ.get("HTTP_REFERER", None))
if loc is None:
return urljoin(*hosts[0])
for split in hosts:
if urlsplit(loc) == split:
return urljoin(*split)
else:
return urljoin(*hosts[0])
return func | Return a function that returns a valid HTTP Origin or localhost
if none found. | Below is the the instruction that describes the task:
### Input:
Return a function that returns a valid HTTP Origin or localhost
if none found.
### Response:
def origin(hosts):
"""
Return a function that returns a valid HTTP Origin or localhost
if none found.
"""
hosts = [urlsplit(h) for h in hosts]
def func(environ):
if 'ISSO_CORS_ORIGIN' in environ:
return environ['ISSO_CORS_ORIGIN']
if not hosts:
return "http://invalid.local"
loc = environ.get("HTTP_ORIGIN", environ.get("HTTP_REFERER", None))
if loc is None:
return urljoin(*hosts[0])
for split in hosts:
if urlsplit(loc) == split:
return urljoin(*split)
else:
return urljoin(*hosts[0])
return func |
def joint_sfs_folded(ac1, ac2, n1=None, n2=None):
"""Compute the joint folded site frequency spectrum between two
populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, 2)
Allele counts for the first population.
ac2 : array_like, int, shape (n_variants, 2)
Allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1)
Array where the (i, j)th element is the number of variant sites with a
minor allele count of i in the first population and j in the second
population.
"""
# check inputs
ac1, n1 = _check_ac_n(ac1, n1)
ac2, n2 = _check_ac_n(ac2, n2)
# compute minor allele counts
mac1 = np.amin(ac1, axis=1)
mac2 = np.amin(ac2, axis=1)
# compute site frequency spectrum
x = n1//2 + 1
y = n2//2 + 1
tmp = (mac1 * y + mac2).astype(int, copy=False)
s = np.bincount(tmp)
s.resize(x, y)
return s | Compute the joint folded site frequency spectrum between two
populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, 2)
Allele counts for the first population.
ac2 : array_like, int, shape (n_variants, 2)
Allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1)
Array where the (i, j)th element is the number of variant sites with a
minor allele count of i in the first population and j in the second
population. | Below is the the instruction that describes the task:
### Input:
Compute the joint folded site frequency spectrum between two
populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, 2)
Allele counts for the first population.
ac2 : array_like, int, shape (n_variants, 2)
Allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1)
Array where the (i, j)th element is the number of variant sites with a
minor allele count of i in the first population and j in the second
population.
### Response:
def joint_sfs_folded(ac1, ac2, n1=None, n2=None):
"""Compute the joint folded site frequency spectrum between two
populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, 2)
Allele counts for the first population.
ac2 : array_like, int, shape (n_variants, 2)
Allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1)
Array where the (i, j)th element is the number of variant sites with a
minor allele count of i in the first population and j in the second
population.
"""
# check inputs
ac1, n1 = _check_ac_n(ac1, n1)
ac2, n2 = _check_ac_n(ac2, n2)
# compute minor allele counts
mac1 = np.amin(ac1, axis=1)
mac2 = np.amin(ac2, axis=1)
# compute site frequency spectrum
x = n1//2 + 1
y = n2//2 + 1
tmp = (mac1 * y + mac2).astype(int, copy=False)
s = np.bincount(tmp)
s.resize(x, y)
return s |
def wrsamp(self, expanded=False, write_dir=''):
"""
Write a wfdb header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files.
"""
# Perform field validity and cohesion checks, and write the
# header file.
self.wrheader(write_dir=write_dir)
if self.n_sig > 0:
# Perform signal validity and cohesion checks, and write the
# associated dat files.
self.wr_dats(expanded=expanded, write_dir=write_dir) | Write a wfdb header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files. | Below is the the instruction that describes the task:
### Input:
Write a wfdb header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files.
### Response:
def wrsamp(self, expanded=False, write_dir=''):
"""
Write a wfdb header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files.
"""
# Perform field validity and cohesion checks, and write the
# header file.
self.wrheader(write_dir=write_dir)
if self.n_sig > 0:
# Perform signal validity and cohesion checks, and write the
# associated dat files.
self.wr_dats(expanded=expanded, write_dir=write_dir) |
def _parse_positional_arguments(self, argv):
"""Parse the positional arguments part of an argument list.
argv <list str>:
List of arguments. Will be altered.
"""
for posarg in self.positional_args:
posarg.parse(argv)
if argv:
if None in [p.nargs for p in self.positional_args]:
msg = '%s too many argument%s given'
plural_s = len(argv) > 1 and 's' or ''
raise BadNumberOfArguments(message=msg % (len(argv), plural_s))
msg = 'This program accepts exactly %s positional arguments (%s given).'
required = len([p.nargs for p in self.positional_args])
raise BadNumberOfArguments(message=msg % (required, required + len(argv))) | Parse the positional arguments part of an argument list.
argv <list str>:
List of arguments. Will be altered. | Below is the the instruction that describes the task:
### Input:
Parse the positional arguments part of an argument list.
argv <list str>:
List of arguments. Will be altered.
### Response:
def _parse_positional_arguments(self, argv):
"""Parse the positional arguments part of an argument list.
argv <list str>:
List of arguments. Will be altered.
"""
for posarg in self.positional_args:
posarg.parse(argv)
if argv:
if None in [p.nargs for p in self.positional_args]:
msg = '%s too many argument%s given'
plural_s = len(argv) > 1 and 's' or ''
raise BadNumberOfArguments(message=msg % (len(argv), plural_s))
msg = 'This program accepts exactly %s positional arguments (%s given).'
required = len([p.nargs for p in self.positional_args])
raise BadNumberOfArguments(message=msg % (required, required + len(argv))) |
def ensure_timezone(func, argname, arg):
"""Argument preprocessor that converts the input into a tzinfo object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(tz=ensure_timezone)
... def foo(tz):
... return tz
>>> foo('utc')
<UTC>
"""
if isinstance(arg, tzinfo):
return arg
if isinstance(arg, string_types):
return timezone(arg)
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a timezone.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
) | Argument preprocessor that converts the input into a tzinfo object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(tz=ensure_timezone)
... def foo(tz):
... return tz
>>> foo('utc')
<UTC> | Below is the the instruction that describes the task:
### Input:
Argument preprocessor that converts the input into a tzinfo object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(tz=ensure_timezone)
... def foo(tz):
... return tz
>>> foo('utc')
<UTC>
### Response:
def ensure_timezone(func, argname, arg):
"""Argument preprocessor that converts the input into a tzinfo object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(tz=ensure_timezone)
... def foo(tz):
... return tz
>>> foo('utc')
<UTC>
"""
if isinstance(arg, tzinfo):
return arg
if isinstance(arg, string_types):
return timezone(arg)
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a timezone.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
) |
def add_index_argument(cls, group):
"""
Subclasses may call this to add an index argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
prefix: str, arguments have to be namespaced
"""
prefix = cls.argument_prefix
group.add_argument(
'--%s-index' % prefix, action="store",
dest="%s_index" % prefix,
help=("Name of the %s root markdown file, can be None" % (
cls.extension_name))) | Subclasses may call this to add an index argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
prefix: str, arguments have to be namespaced | Below is the the instruction that describes the task:
### Input:
Subclasses may call this to add an index argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
prefix: str, arguments have to be namespaced
### Response:
def add_index_argument(cls, group):
"""
Subclasses may call this to add an index argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
prefix: str, arguments have to be namespaced
"""
prefix = cls.argument_prefix
group.add_argument(
'--%s-index' % prefix, action="store",
dest="%s_index" % prefix,
help=("Name of the %s root markdown file, can be None" % (
cls.extension_name))) |
def goals_by_version(self):
"""Goals organized into three tuples by whether they are v1, ambiguous, or v2 goals (respectively).
It's possible for a goal to be implemented with both v1 and v2, in which case a consumer
should use the `--v1` and `--v2` global flags to disambiguate.
"""
v1, ambiguous, v2 = [], [], []
for goal in self._goals:
goal_dot = '{}.'.format(goal)
scope_categories = {s.category
for s in self.known_scope_to_info.values()
if s.scope == goal or s.scope.startswith(goal_dot)}
is_v1 = ScopeInfo.TASK in scope_categories
is_v2 = ScopeInfo.GOAL in scope_categories
if is_v1 and is_v2:
ambiguous.append(goal)
elif is_v1:
v1.append(goal)
else:
v2.append(goal)
return tuple(v1), tuple(ambiguous), tuple(v2) | Goals organized into three tuples by whether they are v1, ambiguous, or v2 goals (respectively).
It's possible for a goal to be implemented with both v1 and v2, in which case a consumer
should use the `--v1` and `--v2` global flags to disambiguate. | Below is the the instruction that describes the task:
### Input:
Goals organized into three tuples by whether they are v1, ambiguous, or v2 goals (respectively).
It's possible for a goal to be implemented with both v1 and v2, in which case a consumer
should use the `--v1` and `--v2` global flags to disambiguate.
### Response:
def goals_by_version(self):
"""Goals organized into three tuples by whether they are v1, ambiguous, or v2 goals (respectively).
It's possible for a goal to be implemented with both v1 and v2, in which case a consumer
should use the `--v1` and `--v2` global flags to disambiguate.
"""
v1, ambiguous, v2 = [], [], []
for goal in self._goals:
goal_dot = '{}.'.format(goal)
scope_categories = {s.category
for s in self.known_scope_to_info.values()
if s.scope == goal or s.scope.startswith(goal_dot)}
is_v1 = ScopeInfo.TASK in scope_categories
is_v2 = ScopeInfo.GOAL in scope_categories
if is_v1 and is_v2:
ambiguous.append(goal)
elif is_v1:
v1.append(goal)
else:
v2.append(goal)
return tuple(v1), tuple(ambiguous), tuple(v2) |
def display_data_item(self, data_item: DataItem, source_display_panel=None, source_data_item=None):
"""Display a new data item and gives it keyboard focus. Uses existing display if it is already displayed.
.. versionadded:: 1.0
Status: Provisional
Scriptable: Yes
"""
for display_panel in self.__document_controller.workspace_controller.display_panels:
if display_panel.data_item == data_item._data_item:
display_panel.request_focus()
return DisplayPanel(display_panel)
result_display_panel = self.__document_controller.next_result_display_panel()
if result_display_panel:
display_item = self.__document_controller.document_model.get_display_item_for_data_item(data_item._data_item)
result_display_panel.set_display_panel_display_item(display_item)
result_display_panel.request_focus()
return DisplayPanel(result_display_panel)
return None | Display a new data item and gives it keyboard focus. Uses existing display if it is already displayed.
.. versionadded:: 1.0
Status: Provisional
Scriptable: Yes | Below is the the instruction that describes the task:
### Input:
Display a new data item and gives it keyboard focus. Uses existing display if it is already displayed.
.. versionadded:: 1.0
Status: Provisional
Scriptable: Yes
### Response:
def display_data_item(self, data_item: DataItem, source_display_panel=None, source_data_item=None):
"""Display a new data item and gives it keyboard focus. Uses existing display if it is already displayed.
.. versionadded:: 1.0
Status: Provisional
Scriptable: Yes
"""
for display_panel in self.__document_controller.workspace_controller.display_panels:
if display_panel.data_item == data_item._data_item:
display_panel.request_focus()
return DisplayPanel(display_panel)
result_display_panel = self.__document_controller.next_result_display_panel()
if result_display_panel:
display_item = self.__document_controller.document_model.get_display_item_for_data_item(data_item._data_item)
result_display_panel.set_display_panel_display_item(display_item)
result_display_panel.request_focus()
return DisplayPanel(result_display_panel)
return None |
def configure_dot_code(self, info):
""" Handles display of the dot code in a text editor.
"""
if not info.initialized:
return
self.dot_code = str(self.model)
retval = self.edit_traits( parent = info.ui.control,
kind = "livemodal",
view = "dot_code_view" ) | Handles display of the dot code in a text editor. | Below is the the instruction that describes the task:
### Input:
Handles display of the dot code in a text editor.
### Response:
def configure_dot_code(self, info):
""" Handles display of the dot code in a text editor.
"""
if not info.initialized:
return
self.dot_code = str(self.model)
retval = self.edit_traits( parent = info.ui.control,
kind = "livemodal",
view = "dot_code_view" ) |
def output_barplot(df, figformat, path, title=None, palette=None):
"""Create barplots based on number of reads and total sum of nucleotides sequenced."""
logging.info("Nanoplotter: Creating barplots for number of reads and total throughput.")
read_count = Plot(path=path + "NanoComp_number_of_reads." + figformat,
title="Comparing number of reads")
ax = sns.countplot(x="dataset",
data=df,
palette=palette)
ax.set(ylabel='Number of reads',
title=title or read_count.title)
plt.xticks(rotation=30, ha='center')
read_count.fig = ax.get_figure()
read_count.save(format=figformat)
plt.close("all")
throughput_bases = Plot(path=path + "NanoComp_total_throughput." + figformat,
title="Comparing throughput in gigabases")
if "aligned_lengths" in df:
throughput = df.groupby('dataset')['aligned_lengths'].sum()
ylabel = 'Total gigabase aligned'
else:
throughput = df.groupby('dataset')['lengths'].sum()
ylabel = 'Total gigabase sequenced'
ax = sns.barplot(x=list(throughput.index),
y=throughput / 1e9,
palette=palette,
order=df["dataset"].unique())
ax.set(ylabel=ylabel,
title=title or throughput_bases.title)
plt.xticks(rotation=30, ha='center')
throughput_bases.fig = ax.get_figure()
throughput_bases.save(format=figformat)
plt.close("all")
return read_count, throughput_bases | Create barplots based on number of reads and total sum of nucleotides sequenced. | Below is the the instruction that describes the task:
### Input:
Create barplots based on number of reads and total sum of nucleotides sequenced.
### Response:
def output_barplot(df, figformat, path, title=None, palette=None):
"""Create barplots based on number of reads and total sum of nucleotides sequenced."""
logging.info("Nanoplotter: Creating barplots for number of reads and total throughput.")
read_count = Plot(path=path + "NanoComp_number_of_reads." + figformat,
title="Comparing number of reads")
ax = sns.countplot(x="dataset",
data=df,
palette=palette)
ax.set(ylabel='Number of reads',
title=title or read_count.title)
plt.xticks(rotation=30, ha='center')
read_count.fig = ax.get_figure()
read_count.save(format=figformat)
plt.close("all")
throughput_bases = Plot(path=path + "NanoComp_total_throughput." + figformat,
title="Comparing throughput in gigabases")
if "aligned_lengths" in df:
throughput = df.groupby('dataset')['aligned_lengths'].sum()
ylabel = 'Total gigabase aligned'
else:
throughput = df.groupby('dataset')['lengths'].sum()
ylabel = 'Total gigabase sequenced'
ax = sns.barplot(x=list(throughput.index),
y=throughput / 1e9,
palette=palette,
order=df["dataset"].unique())
ax.set(ylabel=ylabel,
title=title or throughput_bases.title)
plt.xticks(rotation=30, ha='center')
throughput_bases.fig = ax.get_figure()
throughput_bases.save(format=figformat)
plt.close("all")
return read_count, throughput_bases |
def indexOf(a, b):
"Return the first index of b in a."
for i, j in enumerate(a):
if j == b:
return i
else:
raise ValueError('sequence.index(x): x not in sequence') | Return the first index of b in a. | Below is the the instruction that describes the task:
### Input:
Return the first index of b in a.
### Response:
def indexOf(a, b):
"Return the first index of b in a."
for i, j in enumerate(a):
if j == b:
return i
else:
raise ValueError('sequence.index(x): x not in sequence') |
def _set_maps(self, v, load=False):
"""
Setter method for maps, mapped from YANG variable /rbridge_id/maps (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_maps is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maps() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=maps.maps, is_container='container', presence=False, yang_name="maps", rest_name="maps", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-add-mode': None, u'cli-full-command': None, u'info': u'All MAPS mode related commands.', u'cli-suppress-no': None, u'cli-mode-name': u'config-rbridge-id-$(rbridge-id)-maps'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """maps must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=maps.maps, is_container='container', presence=False, yang_name="maps", rest_name="maps", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-add-mode': None, u'cli-full-command': None, u'info': u'All MAPS mode related commands.', u'cli-suppress-no': None, u'cli-mode-name': u'config-rbridge-id-$(rbridge-id)-maps'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='container', is_config=True)""",
})
self.__maps = t
if hasattr(self, '_set'):
self._set() | Setter method for maps, mapped from YANG variable /rbridge_id/maps (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_maps is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maps() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for maps, mapped from YANG variable /rbridge_id/maps (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_maps is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maps() directly.
### Response:
def _set_maps(self, v, load=False):
"""
Setter method for maps, mapped from YANG variable /rbridge_id/maps (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_maps is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maps() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=maps.maps, is_container='container', presence=False, yang_name="maps", rest_name="maps", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-add-mode': None, u'cli-full-command': None, u'info': u'All MAPS mode related commands.', u'cli-suppress-no': None, u'cli-mode-name': u'config-rbridge-id-$(rbridge-id)-maps'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """maps must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=maps.maps, is_container='container', presence=False, yang_name="maps", rest_name="maps", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-add-mode': None, u'cli-full-command': None, u'info': u'All MAPS mode related commands.', u'cli-suppress-no': None, u'cli-mode-name': u'config-rbridge-id-$(rbridge-id)-maps'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='container', is_config=True)""",
})
self.__maps = t
if hasattr(self, '_set'):
self._set() |
def received(self):
"""
Combined :class:`~charms.reactive.endpoints.JSONUnitDataView` of the
data of all units in this list, with automatic JSON decoding.
"""
if not hasattr(self, '_data'):
# NB: units are reversed so that lowest numbered unit takes precedence
self._data = JSONUnitDataView({key: value
for unit in reversed(self)
for key, value in unit.received_raw.items()})
return self._data | Combined :class:`~charms.reactive.endpoints.JSONUnitDataView` of the
data of all units in this list, with automatic JSON decoding. | Below is the the instruction that describes the task:
### Input:
Combined :class:`~charms.reactive.endpoints.JSONUnitDataView` of the
data of all units in this list, with automatic JSON decoding.
### Response:
def received(self):
"""
Combined :class:`~charms.reactive.endpoints.JSONUnitDataView` of the
data of all units in this list, with automatic JSON decoding.
"""
if not hasattr(self, '_data'):
# NB: units are reversed so that lowest numbered unit takes precedence
self._data = JSONUnitDataView({key: value
for unit in reversed(self)
for key, value in unit.received_raw.items()})
return self._data |
def keep_only_digits(s):
r'''
local helper to just keep digits
'''
fs = ''
for c in s:
if c.isdigit():
fs += c
return int(fs) | r'''
local helper to just keep digits | Below is the the instruction that describes the task:
### Input:
r'''
local helper to just keep digits
### Response:
def keep_only_digits(s):
r'''
local helper to just keep digits
'''
fs = ''
for c in s:
if c.isdigit():
fs += c
return int(fs) |
def createZone(self, zone, zoneFile=None, callback=None, errback=None,
**kwargs):
"""
Create a new zone, and return an associated high level Zone object.
Several optional keyword arguments are available to configure the SOA
record.
If zoneFile is specified, upload the specific zone definition file
to populate the zone with.
:param str zone: zone name, like 'example.com'
:param str zoneFile: absolute path of a zone file
:keyword int retry: retry time
:keyword int refresh: refresh ttl
:keyword int expiry: expiry ttl
:keyword int nx_ttl: nxdomain TTL
:rtype: :py:class:`ns1.zones.Zone`
"""
import ns1.zones
zone = ns1.zones.Zone(self.config, zone)
return zone.create(zoneFile=zoneFile, callback=callback,
errback=errback, **kwargs) | Create a new zone, and return an associated high level Zone object.
Several optional keyword arguments are available to configure the SOA
record.
If zoneFile is specified, upload the specific zone definition file
to populate the zone with.
:param str zone: zone name, like 'example.com'
:param str zoneFile: absolute path of a zone file
:keyword int retry: retry time
:keyword int refresh: refresh ttl
:keyword int expiry: expiry ttl
:keyword int nx_ttl: nxdomain TTL
:rtype: :py:class:`ns1.zones.Zone` | Below is the the instruction that describes the task:
### Input:
Create a new zone, and return an associated high level Zone object.
Several optional keyword arguments are available to configure the SOA
record.
If zoneFile is specified, upload the specific zone definition file
to populate the zone with.
:param str zone: zone name, like 'example.com'
:param str zoneFile: absolute path of a zone file
:keyword int retry: retry time
:keyword int refresh: refresh ttl
:keyword int expiry: expiry ttl
:keyword int nx_ttl: nxdomain TTL
:rtype: :py:class:`ns1.zones.Zone`
### Response:
def createZone(self, zone, zoneFile=None, callback=None, errback=None,
**kwargs):
"""
Create a new zone, and return an associated high level Zone object.
Several optional keyword arguments are available to configure the SOA
record.
If zoneFile is specified, upload the specific zone definition file
to populate the zone with.
:param str zone: zone name, like 'example.com'
:param str zoneFile: absolute path of a zone file
:keyword int retry: retry time
:keyword int refresh: refresh ttl
:keyword int expiry: expiry ttl
:keyword int nx_ttl: nxdomain TTL
:rtype: :py:class:`ns1.zones.Zone`
"""
import ns1.zones
zone = ns1.zones.Zone(self.config, zone)
return zone.create(zoneFile=zoneFile, callback=callback,
errback=errback, **kwargs) |
def setPenColor(self, color):
"""
Sets the pen for this node.
:param color <QColor> || None
"""
color = QColor(color)
if self._palette is None:
self._palette = XNodePalette(self._scenePalette)
self._palette.setColor(self._palette.NodeForeground, color)
self.setDirty() | Sets the pen for this node.
:param color <QColor> || None | Below is the the instruction that describes the task:
### Input:
Sets the pen for this node.
:param color <QColor> || None
### Response:
def setPenColor(self, color):
"""
Sets the pen for this node.
:param color <QColor> || None
"""
color = QColor(color)
if self._palette is None:
self._palette = XNodePalette(self._scenePalette)
self._palette.setColor(self._palette.NodeForeground, color)
self.setDirty() |
def _get_taxids(self, taxids=None):
"""Return user-specified taxids or taxids in self.taxid2asscs"""
taxid_keys = set(self.taxid2asscs.keys())
return taxid_keys if taxids is None else set(taxids).intersection(taxid_keys) | Return user-specified taxids or taxids in self.taxid2asscs | Below is the the instruction that describes the task:
### Input:
Return user-specified taxids or taxids in self.taxid2asscs
### Response:
def _get_taxids(self, taxids=None):
"""Return user-specified taxids or taxids in self.taxid2asscs"""
taxid_keys = set(self.taxid2asscs.keys())
return taxid_keys if taxids is None else set(taxids).intersection(taxid_keys) |
def _get_dbid2goids(associations):
"""Return gene2go data for user-specified taxids."""
id2gos = cx.defaultdict(set)
for ntd in associations:
id2gos[ntd.DB_ID].add(ntd.GO_ID)
return dict(id2gos) | Return gene2go data for user-specified taxids. | Below is the the instruction that describes the task:
### Input:
Return gene2go data for user-specified taxids.
### Response:
def _get_dbid2goids(associations):
"""Return gene2go data for user-specified taxids."""
id2gos = cx.defaultdict(set)
for ntd in associations:
id2gos[ntd.DB_ID].add(ntd.GO_ID)
return dict(id2gos) |
def add_var_arg(self,arg_index):
"""
Add a command to the submit file to allow variable (macro) arguments
to be passed to the executable.
"""
try:
self.__var_args[arg_index]
except IndexError:
if arg_index != self.__arg_index:
raise CondorDAGJobError, "mismatch between job and node var_arg index"
self.__var_args.append('$(macroargument%s)' % str(arg_index))
self.add_arg(self.__var_args[self.__arg_index])
self.__arg_index += 1 | Add a command to the submit file to allow variable (macro) arguments
to be passed to the executable. | Below is the the instruction that describes the task:
### Input:
Add a command to the submit file to allow variable (macro) arguments
to be passed to the executable.
### Response:
def add_var_arg(self,arg_index):
"""
Add a command to the submit file to allow variable (macro) arguments
to be passed to the executable.
"""
try:
self.__var_args[arg_index]
except IndexError:
if arg_index != self.__arg_index:
raise CondorDAGJobError, "mismatch between job and node var_arg index"
self.__var_args.append('$(macroargument%s)' % str(arg_index))
self.add_arg(self.__var_args[self.__arg_index])
self.__arg_index += 1 |
def instruction_in_grid(self, instruction):
"""Returns an `InstructionInGrid` object for the `instruction`"""
row_position = self._rows_in_grid[instruction.row].xy
x = instruction.index_of_first_consumed_mesh_in_row
position = Point(row_position.x + x, row_position.y)
return InstructionInGrid(instruction, position) | Returns an `InstructionInGrid` object for the `instruction` | Below is the the instruction that describes the task:
### Input:
Returns an `InstructionInGrid` object for the `instruction`
### Response:
def instruction_in_grid(self, instruction):
"""Returns an `InstructionInGrid` object for the `instruction`"""
row_position = self._rows_in_grid[instruction.row].xy
x = instruction.index_of_first_consumed_mesh_in_row
position = Point(row_position.x + x, row_position.y)
return InstructionInGrid(instruction, position) |
def from_properties(cls, angle, axis, invert, translation):
"""Initialize a transformation based on the properties"""
rot = Rotation.from_properties(angle, axis, invert)
return Complete(rot.r, translation) | Initialize a transformation based on the properties | Below is the the instruction that describes the task:
### Input:
Initialize a transformation based on the properties
### Response:
def from_properties(cls, angle, axis, invert, translation):
"""Initialize a transformation based on the properties"""
rot = Rotation.from_properties(angle, axis, invert)
return Complete(rot.r, translation) |
def datetime_utc_to_local(utc):
"""
An ugly hack to convert naive :std:`datetime.datetime` object containing
UTC time to a naive :std:`datetime.datetime` object with local time.
It seems standard Python 2.3 library doesn't provide any better way to
do that.
"""
# pylint: disable-msg=C0103
ts = time.time()
cur = datetime.datetime.fromtimestamp(ts)
cur_utc = datetime.datetime.utcfromtimestamp(ts)
offset = cur - cur_utc
t = utc
d = datetime.timedelta(hours = 2)
while d > _MINUTE:
local = t + offset
tm = local.timetuple()
tm = tm[0:8] + (0, )
ts = time.mktime(tm)
u = datetime.datetime.utcfromtimestamp(ts)
diff = u - utc
if diff < _MINUTE and diff > -_MINUTE:
break
if diff > _NULLDELTA:
offset -= d
else:
offset += d
d //= 2
return local | An ugly hack to convert naive :std:`datetime.datetime` object containing
UTC time to a naive :std:`datetime.datetime` object with local time.
It seems standard Python 2.3 library doesn't provide any better way to
do that. | Below is the the instruction that describes the task:
### Input:
An ugly hack to convert naive :std:`datetime.datetime` object containing
UTC time to a naive :std:`datetime.datetime` object with local time.
It seems standard Python 2.3 library doesn't provide any better way to
do that.
### Response:
def datetime_utc_to_local(utc):
"""
An ugly hack to convert naive :std:`datetime.datetime` object containing
UTC time to a naive :std:`datetime.datetime` object with local time.
It seems standard Python 2.3 library doesn't provide any better way to
do that.
"""
# pylint: disable-msg=C0103
ts = time.time()
cur = datetime.datetime.fromtimestamp(ts)
cur_utc = datetime.datetime.utcfromtimestamp(ts)
offset = cur - cur_utc
t = utc
d = datetime.timedelta(hours = 2)
while d > _MINUTE:
local = t + offset
tm = local.timetuple()
tm = tm[0:8] + (0, )
ts = time.mktime(tm)
u = datetime.datetime.utcfromtimestamp(ts)
diff = u - utc
if diff < _MINUTE and diff > -_MINUTE:
break
if diff > _NULLDELTA:
offset -= d
else:
offset += d
d //= 2
return local |
def _translate_cond(self, c): #pylint:disable=no-self-use
"""
Checks whether this condition can be supported by FastMemory."
"""
if isinstance(c, claripy.ast.Base) and not c.singlevalued:
raise SimFastMemoryError("size not supported")
if c is None:
return True
else:
return self.state.solver.eval_upto(c, 1)[0] | Checks whether this condition can be supported by FastMemory." | Below is the the instruction that describes the task:
### Input:
Checks whether this condition can be supported by FastMemory."
### Response:
def _translate_cond(self, c): #pylint:disable=no-self-use
"""
Checks whether this condition can be supported by FastMemory."
"""
if isinstance(c, claripy.ast.Base) and not c.singlevalued:
raise SimFastMemoryError("size not supported")
if c is None:
return True
else:
return self.state.solver.eval_upto(c, 1)[0] |
def represented_args(args, separator=" "):
"""
Args:
args (list | tuple | None): Arguments to represent
separator (str | unicode): Separator to use
Returns:
(str): Quoted as needed textual representation
"""
result = []
if args:
for text in args:
result.append(quoted(short(text)))
return separator.join(result) | Args:
args (list | tuple | None): Arguments to represent
separator (str | unicode): Separator to use
Returns:
(str): Quoted as needed textual representation | Below is the the instruction that describes the task:
### Input:
Args:
args (list | tuple | None): Arguments to represent
separator (str | unicode): Separator to use
Returns:
(str): Quoted as needed textual representation
### Response:
def represented_args(args, separator=" "):
"""
Args:
args (list | tuple | None): Arguments to represent
separator (str | unicode): Separator to use
Returns:
(str): Quoted as needed textual representation
"""
result = []
if args:
for text in args:
result.append(quoted(short(text)))
return separator.join(result) |
def get_function(pkgpath):
"""Take a full path to a python method or class, for example
mypkg.subpkg.method and return the method or class (after importing the
required packages)
"""
# Extract the module and function name from pkgpath
elems = pkgpath.split('.')
if len(elems) <= 1:
raise PyMacaronCoreException("Path %s is too short. Should be at least module.func." % elems)
func_name = elems[-1]
func_module = '.'.join(elems[0:-1])
# Load the function's module and get the function
try:
m = import_module(func_module)
f = getattr(m, func_name)
return f
except Exception as e:
t = traceback.format_exc()
raise PyMacaronCoreException("Failed to import %s: %s\nTrace:\n%s" % (pkgpath, str(e), t)) | Take a full path to a python method or class, for example
mypkg.subpkg.method and return the method or class (after importing the
required packages) | Below is the the instruction that describes the task:
### Input:
Take a full path to a python method or class, for example
mypkg.subpkg.method and return the method or class (after importing the
required packages)
### Response:
def get_function(pkgpath):
"""Take a full path to a python method or class, for example
mypkg.subpkg.method and return the method or class (after importing the
required packages)
"""
# Extract the module and function name from pkgpath
elems = pkgpath.split('.')
if len(elems) <= 1:
raise PyMacaronCoreException("Path %s is too short. Should be at least module.func." % elems)
func_name = elems[-1]
func_module = '.'.join(elems[0:-1])
# Load the function's module and get the function
try:
m = import_module(func_module)
f = getattr(m, func_name)
return f
except Exception as e:
t = traceback.format_exc()
raise PyMacaronCoreException("Failed to import %s: %s\nTrace:\n%s" % (pkgpath, str(e), t)) |
def _learner_distributed(learn:Learner, cuda_id:int, cache_dir:PathOrStr='tmp'):
"Put `learn` on distributed training with `cuda_id`."
learn.callbacks.append(DistributedTrainer(learn, cuda_id))
learn.callbacks.append(DistributedRecorder(learn, cuda_id, cache_dir))
return learn | Put `learn` on distributed training with `cuda_id`. | Below is the the instruction that describes the task:
### Input:
Put `learn` on distributed training with `cuda_id`.
### Response:
def _learner_distributed(learn:Learner, cuda_id:int, cache_dir:PathOrStr='tmp'):
"Put `learn` on distributed training with `cuda_id`."
learn.callbacks.append(DistributedTrainer(learn, cuda_id))
learn.callbacks.append(DistributedRecorder(learn, cuda_id, cache_dir))
return learn |
def class_method(cls, f):
"""Decorator which dynamically binds class methods to the model for later use."""
setattr(cls, f.__name__, classmethod(f))
return f | Decorator which dynamically binds class methods to the model for later use. | Below is the the instruction that describes the task:
### Input:
Decorator which dynamically binds class methods to the model for later use.
### Response:
def class_method(cls, f):
"""Decorator which dynamically binds class methods to the model for later use."""
setattr(cls, f.__name__, classmethod(f))
return f |
def encrypt(self, plaintext, encoder=encoding.RawEncoder):
"""
Encrypts the plaintext message using a random-generated ephemeral
keypair and returns a "composed ciphertext", containing both
the public part of the keypair and the ciphertext proper,
encoded with the encoder.
The private part of the ephemeral key-pair will be scrubbed before
returning the ciphertext, therefore, the sender will not be able to
decrypt the generated ciphertext.
:param plaintext: [:class:`bytes`] The plaintext message to encrypt
:param encoder: The encoder to use to encode the ciphertext
:return bytes: encoded ciphertext
"""
ciphertext = nacl.bindings.crypto_box_seal(
plaintext,
self._public_key
)
encoded_ciphertext = encoder.encode(ciphertext)
return encoded_ciphertext | Encrypts the plaintext message using a random-generated ephemeral
keypair and returns a "composed ciphertext", containing both
the public part of the keypair and the ciphertext proper,
encoded with the encoder.
The private part of the ephemeral key-pair will be scrubbed before
returning the ciphertext, therefore, the sender will not be able to
decrypt the generated ciphertext.
:param plaintext: [:class:`bytes`] The plaintext message to encrypt
:param encoder: The encoder to use to encode the ciphertext
:return bytes: encoded ciphertext | Below is the the instruction that describes the task:
### Input:
Encrypts the plaintext message using a random-generated ephemeral
keypair and returns a "composed ciphertext", containing both
the public part of the keypair and the ciphertext proper,
encoded with the encoder.
The private part of the ephemeral key-pair will be scrubbed before
returning the ciphertext, therefore, the sender will not be able to
decrypt the generated ciphertext.
:param plaintext: [:class:`bytes`] The plaintext message to encrypt
:param encoder: The encoder to use to encode the ciphertext
:return bytes: encoded ciphertext
### Response:
def encrypt(self, plaintext, encoder=encoding.RawEncoder):
"""
Encrypts the plaintext message using a random-generated ephemeral
keypair and returns a "composed ciphertext", containing both
the public part of the keypair and the ciphertext proper,
encoded with the encoder.
The private part of the ephemeral key-pair will be scrubbed before
returning the ciphertext, therefore, the sender will not be able to
decrypt the generated ciphertext.
:param plaintext: [:class:`bytes`] The plaintext message to encrypt
:param encoder: The encoder to use to encode the ciphertext
:return bytes: encoded ciphertext
"""
ciphertext = nacl.bindings.crypto_box_seal(
plaintext,
self._public_key
)
encoded_ciphertext = encoder.encode(ciphertext)
return encoded_ciphertext |
def _uncheck_descendant(self, item):
"""Uncheck the boxes of item's descendant."""
children = self.get_children(item)
for iid in children:
self.change_state(iid, "unchecked")
self._uncheck_descendant(iid) | Uncheck the boxes of item's descendant. | Below is the the instruction that describes the task:
### Input:
Uncheck the boxes of item's descendant.
### Response:
def _uncheck_descendant(self, item):
"""Uncheck the boxes of item's descendant."""
children = self.get_children(item)
for iid in children:
self.change_state(iid, "unchecked")
self._uncheck_descendant(iid) |
def get_next_section_start_line(self, data):
"""Get the starting line number of next section.
It will return -1 if no section was found.
The section is a section key (e.g. 'Parameters:')
then the content
:param data: a list of strings containing the docstring's lines
:returns: the index of next section else -1
"""
start = -1
for i, line in enumerate(data):
if isin_alone([k + ":" for k in self.opt.values()], line):
start = i
break
return start | Get the starting line number of next section.
It will return -1 if no section was found.
The section is a section key (e.g. 'Parameters:')
then the content
:param data: a list of strings containing the docstring's lines
:returns: the index of next section else -1 | Below is the the instruction that describes the task:
### Input:
Get the starting line number of next section.
It will return -1 if no section was found.
The section is a section key (e.g. 'Parameters:')
then the content
:param data: a list of strings containing the docstring's lines
:returns: the index of next section else -1
### Response:
def get_next_section_start_line(self, data):
"""Get the starting line number of next section.
It will return -1 if no section was found.
The section is a section key (e.g. 'Parameters:')
then the content
:param data: a list of strings containing the docstring's lines
:returns: the index of next section else -1
"""
start = -1
for i, line in enumerate(data):
if isin_alone([k + ":" for k in self.opt.values()], line):
start = i
break
return start |
def clean(input, suffix, stat="pmode1", maxiter=15, sigrej=2.0,
lower=None, upper=None, binwidth=0.3,
mask1=None, mask2=None, dqbits=None,
rpt_clean=0, atol=0.01, clobber=False, verbose=True):
r"""Remove horizontal stripes from ACS WFC post-SM4 data.
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*flt.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product. This string will be appended
to the suffix in each input filename to create the
new output filename. For example, setting `suffix='csck'`
will create '\*_csck.fits' images.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's ``imagestats`` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
mask1 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
mask2 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `mask1` and `mask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
verbose : bool
Print informational messages. Default = True.
"""
from stsci.tools import parseinput # Optional package dependency
flist = parseinput.parseinput(input)[0]
if isinstance(mask1, str):
mlist1 = parseinput.parseinput(mask1)[0]
elif isinstance(mask1, np.ndarray):
mlist1 = [mask1.copy()]
elif mask1 is None:
mlist1 = []
elif isinstance(mask1, list):
mlist1 = []
for m in mask1:
if isinstance(m, np.ndarray):
mlist1.append(m.copy())
elif isinstance(m, str):
mlist1 += parseinput.parseinput(m)[0]
else:
raise TypeError("'mask1' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'mask1' must be either a str, or a "
"numpy.ndarray, or a list of the two type of "
"values.")
if isinstance(mask2, str):
mlist2 = parseinput.parseinput(mask2)[0]
elif isinstance(mask2, np.ndarray):
mlist2 = [mask2.copy()]
elif mask2 is None:
mlist2 = []
elif isinstance(mask2, list):
mlist2 = []
for m in mask2:
if isinstance(m, np.ndarray):
mlist2.append(m.copy())
elif isinstance(m, str):
mlist2 += parseinput.parseinput(m)[0]
else:
raise TypeError("'mask2' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'mask2' must be either a str or a "
"numpy.ndarray, or a list of the two type of "
"values.")
n_input = len(flist)
n_mask1 = len(mlist1)
n_mask2 = len(mlist2)
if n_input == 0:
raise ValueError("No input file(s) provided or "
"the file(s) do not exist")
if n_mask1 == 0:
mlist1 = [None] * n_input
elif n_mask1 != n_input:
raise ValueError('Insufficient masks for [SCI,1]')
if n_mask2 == 0:
mlist2 = [None] * n_input
elif n_mask2 != n_input:
raise ValueError('Insufficient masks for [SCI,2]')
for image, maskfile1, maskfile2 in zip(flist, mlist1, mlist2):
# Skip processing pre-SM4 images
if (fits.getval(image, 'EXPSTART') <= MJD_SM4):
LOG.warning('{0} is pre-SM4. Skipping...'.format(image))
continue
# Data must be in ELECTRONS
if (fits.getval(image, 'BUNIT', ext=1) != 'ELECTRONS'):
LOG.warning('{0} is not in ELECTRONS. Skipping...'.format(image))
continue
# Skip processing CTECORR-ed images
if (fits.getval(image, 'PCTECORR') == 'COMPLETE'):
LOG.warning('{0} already has PCTECORR applied. '
'Skipping...'.format(image))
continue
# generate output filename for each input based on specification
# of the output suffix
output = image.replace('.fits', '_' + suffix + '.fits')
LOG.info('Processing ' + image)
# verify masks defined (or not) simultaneously:
if (fits.getval(image, 'CCDAMP') == 'ABCD' and
((mask1 is not None and mask2 is None) or
(mask1 is None and mask2 is not None))):
raise ValueError("Both 'mask1' and 'mask2' must be specified "
"or not specified together.")
maskdata = _read_mask(maskfile1, maskfile2)
perform_correction(image, output, stat=stat, maxiter=maxiter,
sigrej=sigrej, lower=lower, upper=upper,
binwidth=binwidth, mask=maskdata, dqbits=dqbits,
rpt_clean=rpt_clean, atol=atol,
clobber=clobber, verbose=verbose)
LOG.info(output + ' created') | r"""Remove horizontal stripes from ACS WFC post-SM4 data.
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*flt.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product. This string will be appended
to the suffix in each input filename to create the
new output filename. For example, setting `suffix='csck'`
will create '\*_csck.fits' images.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's ``imagestats`` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
mask1 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
mask2 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `mask1` and `mask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
verbose : bool
Print informational messages. Default = True. | Below is the the instruction that describes the task:
### Input:
r"""Remove horizontal stripes from ACS WFC post-SM4 data.
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*flt.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product. This string will be appended
to the suffix in each input filename to create the
new output filename. For example, setting `suffix='csck'`
will create '\*_csck.fits' images.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's ``imagestats`` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
mask1 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
mask2 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `mask1` and `mask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
verbose : bool
Print informational messages. Default = True.
### Response:
def clean(input, suffix, stat="pmode1", maxiter=15, sigrej=2.0,
lower=None, upper=None, binwidth=0.3,
mask1=None, mask2=None, dqbits=None,
rpt_clean=0, atol=0.01, clobber=False, verbose=True):
r"""Remove horizontal stripes from ACS WFC post-SM4 data.
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*flt.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product. This string will be appended
to the suffix in each input filename to create the
new output filename. For example, setting `suffix='csck'`
will create '\*_csck.fits' images.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's ``imagestats`` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
mask1 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
mask2 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `mask1` and `mask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
verbose : bool
Print informational messages. Default = True.
"""
from stsci.tools import parseinput # Optional package dependency
flist = parseinput.parseinput(input)[0]
if isinstance(mask1, str):
mlist1 = parseinput.parseinput(mask1)[0]
elif isinstance(mask1, np.ndarray):
mlist1 = [mask1.copy()]
elif mask1 is None:
mlist1 = []
elif isinstance(mask1, list):
mlist1 = []
for m in mask1:
if isinstance(m, np.ndarray):
mlist1.append(m.copy())
elif isinstance(m, str):
mlist1 += parseinput.parseinput(m)[0]
else:
raise TypeError("'mask1' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'mask1' must be either a str, or a "
"numpy.ndarray, or a list of the two type of "
"values.")
if isinstance(mask2, str):
mlist2 = parseinput.parseinput(mask2)[0]
elif isinstance(mask2, np.ndarray):
mlist2 = [mask2.copy()]
elif mask2 is None:
mlist2 = []
elif isinstance(mask2, list):
mlist2 = []
for m in mask2:
if isinstance(m, np.ndarray):
mlist2.append(m.copy())
elif isinstance(m, str):
mlist2 += parseinput.parseinput(m)[0]
else:
raise TypeError("'mask2' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'mask2' must be either a str or a "
"numpy.ndarray, or a list of the two type of "
"values.")
n_input = len(flist)
n_mask1 = len(mlist1)
n_mask2 = len(mlist2)
if n_input == 0:
raise ValueError("No input file(s) provided or "
"the file(s) do not exist")
if n_mask1 == 0:
mlist1 = [None] * n_input
elif n_mask1 != n_input:
raise ValueError('Insufficient masks for [SCI,1]')
if n_mask2 == 0:
mlist2 = [None] * n_input
elif n_mask2 != n_input:
raise ValueError('Insufficient masks for [SCI,2]')
for image, maskfile1, maskfile2 in zip(flist, mlist1, mlist2):
# Skip processing pre-SM4 images
if (fits.getval(image, 'EXPSTART') <= MJD_SM4):
LOG.warning('{0} is pre-SM4. Skipping...'.format(image))
continue
# Data must be in ELECTRONS
if (fits.getval(image, 'BUNIT', ext=1) != 'ELECTRONS'):
LOG.warning('{0} is not in ELECTRONS. Skipping...'.format(image))
continue
# Skip processing CTECORR-ed images
if (fits.getval(image, 'PCTECORR') == 'COMPLETE'):
LOG.warning('{0} already has PCTECORR applied. '
'Skipping...'.format(image))
continue
# generate output filename for each input based on specification
# of the output suffix
output = image.replace('.fits', '_' + suffix + '.fits')
LOG.info('Processing ' + image)
# verify masks defined (or not) simultaneously:
if (fits.getval(image, 'CCDAMP') == 'ABCD' and
((mask1 is not None and mask2 is None) or
(mask1 is None and mask2 is not None))):
raise ValueError("Both 'mask1' and 'mask2' must be specified "
"or not specified together.")
maskdata = _read_mask(maskfile1, maskfile2)
perform_correction(image, output, stat=stat, maxiter=maxiter,
sigrej=sigrej, lower=lower, upper=upper,
binwidth=binwidth, mask=maskdata, dqbits=dqbits,
rpt_clean=rpt_clean, atol=atol,
clobber=clobber, verbose=verbose)
LOG.info(output + ' created') |
def connect_entry_signals():
"""
Connect all the signals on Entry model.
"""
post_save.connect(
ping_directories_handler, sender=Entry,
dispatch_uid=ENTRY_PS_PING_DIRECTORIES)
post_save.connect(
ping_external_urls_handler, sender=Entry,
dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS)
post_save.connect(
flush_similar_cache_handler, sender=Entry,
dispatch_uid=ENTRY_PS_FLUSH_SIMILAR_CACHE)
post_delete.connect(
flush_similar_cache_handler, sender=Entry,
dispatch_uid=ENTRY_PD_FLUSH_SIMILAR_CACHE) | Connect all the signals on Entry model. | Below is the the instruction that describes the task:
### Input:
Connect all the signals on Entry model.
### Response:
def connect_entry_signals():
"""
Connect all the signals on Entry model.
"""
post_save.connect(
ping_directories_handler, sender=Entry,
dispatch_uid=ENTRY_PS_PING_DIRECTORIES)
post_save.connect(
ping_external_urls_handler, sender=Entry,
dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS)
post_save.connect(
flush_similar_cache_handler, sender=Entry,
dispatch_uid=ENTRY_PS_FLUSH_SIMILAR_CACHE)
post_delete.connect(
flush_similar_cache_handler, sender=Entry,
dispatch_uid=ENTRY_PD_FLUSH_SIMILAR_CACHE) |
def version_binary(self):
'''
Return version number which is stored in binary format.
Returns:
str: <major 0-255>.<minior 0-255>.<build 0-65535> or None if not found
'''
# Under MSI 'Version' is a 'REG_DWORD' which then sets other registry
# values like DisplayVersion to x.x.x to the same value.
# However not everyone plays by the rules, so we need to check first.
# version_binary_data will be None if the reg value does not exist.
# Some installs set 'Version' to REG_SZ (string) which is not
# the MSI standard
try:
item_value, item_type = self.__reg_query_value(self.__reg_uninstall_handle, 'version')
except pywintypes.error as exc: # pylint: disable=no-member
if exc.winerror == winerror.ERROR_FILE_NOT_FOUND:
# Not Found
return '', ''
version_binary_text = ''
version_src = ''
if item_value:
if item_type == win32con.REG_DWORD:
if isinstance(item_value, six.integer_types):
version_binary_raw = item_value
if version_binary_raw:
# Major.Minor.Build
version_binary_text = '{0}.{1}.{2}'.format(
version_binary_raw >> 24 & 0xff,
version_binary_raw >> 16 & 0xff,
version_binary_raw & 0xffff)
version_src = 'binary-version'
elif (item_type == win32con.REG_SZ and
isinstance(item_value, six.string_types) and
self.__version_pattern.match(item_value) is not None):
# Hey, version should be a int/REG_DWORD, an installer has set
# it to a string
version_binary_text = item_value.strip(' ')
version_src = 'binary-version (string)'
return (version_binary_text, version_src) | Return version number which is stored in binary format.
Returns:
str: <major 0-255>.<minior 0-255>.<build 0-65535> or None if not found | Below is the the instruction that describes the task:
### Input:
Return version number which is stored in binary format.
Returns:
str: <major 0-255>.<minior 0-255>.<build 0-65535> or None if not found
### Response:
def version_binary(self):
'''
Return version number which is stored in binary format.
Returns:
str: <major 0-255>.<minior 0-255>.<build 0-65535> or None if not found
'''
# Under MSI 'Version' is a 'REG_DWORD' which then sets other registry
# values like DisplayVersion to x.x.x to the same value.
# However not everyone plays by the rules, so we need to check first.
# version_binary_data will be None if the reg value does not exist.
# Some installs set 'Version' to REG_SZ (string) which is not
# the MSI standard
try:
item_value, item_type = self.__reg_query_value(self.__reg_uninstall_handle, 'version')
except pywintypes.error as exc: # pylint: disable=no-member
if exc.winerror == winerror.ERROR_FILE_NOT_FOUND:
# Not Found
return '', ''
version_binary_text = ''
version_src = ''
if item_value:
if item_type == win32con.REG_DWORD:
if isinstance(item_value, six.integer_types):
version_binary_raw = item_value
if version_binary_raw:
# Major.Minor.Build
version_binary_text = '{0}.{1}.{2}'.format(
version_binary_raw >> 24 & 0xff,
version_binary_raw >> 16 & 0xff,
version_binary_raw & 0xffff)
version_src = 'binary-version'
elif (item_type == win32con.REG_SZ and
isinstance(item_value, six.string_types) and
self.__version_pattern.match(item_value) is not None):
# Hey, version should be a int/REG_DWORD, an installer has set
# it to a string
version_binary_text = item_value.strip(' ')
version_src = 'binary-version (string)'
return (version_binary_text, version_src) |
def preprocess_input(features, target, train_config, preprocess_output_dir,
model_type):
"""Perform some transformations after reading in the input tensors.
Args:
features: dict of feature_name to tensor
target: tensor
train_config: our training config object
preprocess_output_dir: folder should contain the vocab files.
model_type: the tf model type.
Raises:
ValueError: if wrong transforms are used
Returns:
New features dict and new target tensor.
"""
target_name = train_config['target_column']
key_name = train_config['key_column']
# Do the numerical transforms.
# Numerical transforms supported for regression/classification
# 1) num -> do nothing (identity, default)
# 2) num -> scale to -1, 1 (scale)
# 3) num -> scale to -a, a (scale with value parameter)
with tf.name_scope('numerical_feature_preprocess'):
if train_config['numerical_columns']:
numerical_analysis_file = os.path.join(preprocess_output_dir,
NUMERICAL_ANALYSIS)
if not file_io.file_exists(numerical_analysis_file):
raise ValueError('File %s not found in %s' %
(NUMERICAL_ANALYSIS, preprocess_output_dir))
numerical_anlysis = json.loads(
python_portable_string(
file_io.read_file_to_string(numerical_analysis_file)))
for name in train_config['numerical_columns']:
if name == target_name or name == key_name:
continue
transform_config = train_config['transforms'].get(name, {})
transform_name = transform_config.get('transform', None)
if transform_name == 'scale':
value = float(transform_config.get('value', 1.0))
features[name] = _scale_tensor(
features[name],
range_min=numerical_anlysis[name]['min'],
range_max=numerical_anlysis[name]['max'],
scale_min=-value,
scale_max=value)
elif transform_name == 'identity' or transform_name is None:
pass
else:
raise ValueError(('For numerical variables, only scale '
'and identity are supported: '
'Error for %s') % name)
# Do target transform if it exists.
if target is not None:
with tf.name_scope('target_feature_preprocess'):
if target_name in train_config['categorical_columns']:
labels = train_config['vocab_stats'][target_name]['labels']
table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)
target = table.lookup(target)
# target = tf.contrib.lookup.string_to_index(target, labels)
# Do categorical transforms. Only apply vocab mapping. The real
# transforms are done with tf learn column features.
with tf.name_scope('categorical_feature_preprocess'):
for name in train_config['categorical_columns']:
if name == key_name or name == target_name:
continue
transform_config = train_config['transforms'].get(name, {})
transform_name = transform_config.get('transform', None)
if is_dnn_model(model_type):
if transform_name == 'embedding' or transform_name == 'one_hot' or transform_name is None:
map_vocab = True
else:
raise ValueError('Unknown transform %s' % transform_name)
elif is_linear_model(model_type):
if (transform_name == 'one_hot' or transform_name is None):
map_vocab = True
elif transform_name == 'embedding':
map_vocab = False
else:
raise ValueError('Unknown transform %s' % transform_name)
if map_vocab:
labels = train_config['vocab_stats'][name]['labels']
table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)
features[name] = table.lookup(features[name])
return features, target | Perform some transformations after reading in the input tensors.
Args:
features: dict of feature_name to tensor
target: tensor
train_config: our training config object
preprocess_output_dir: folder should contain the vocab files.
model_type: the tf model type.
Raises:
ValueError: if wrong transforms are used
Returns:
New features dict and new target tensor. | Below is the the instruction that describes the task:
### Input:
Perform some transformations after reading in the input tensors.
Args:
features: dict of feature_name to tensor
target: tensor
train_config: our training config object
preprocess_output_dir: folder should contain the vocab files.
model_type: the tf model type.
Raises:
ValueError: if wrong transforms are used
Returns:
New features dict and new target tensor.
### Response:
def preprocess_input(features, target, train_config, preprocess_output_dir,
model_type):
"""Perform some transformations after reading in the input tensors.
Args:
features: dict of feature_name to tensor
target: tensor
train_config: our training config object
preprocess_output_dir: folder should contain the vocab files.
model_type: the tf model type.
Raises:
ValueError: if wrong transforms are used
Returns:
New features dict and new target tensor.
"""
target_name = train_config['target_column']
key_name = train_config['key_column']
# Do the numerical transforms.
# Numerical transforms supported for regression/classification
# 1) num -> do nothing (identity, default)
# 2) num -> scale to -1, 1 (scale)
# 3) num -> scale to -a, a (scale with value parameter)
with tf.name_scope('numerical_feature_preprocess'):
if train_config['numerical_columns']:
numerical_analysis_file = os.path.join(preprocess_output_dir,
NUMERICAL_ANALYSIS)
if not file_io.file_exists(numerical_analysis_file):
raise ValueError('File %s not found in %s' %
(NUMERICAL_ANALYSIS, preprocess_output_dir))
numerical_anlysis = json.loads(
python_portable_string(
file_io.read_file_to_string(numerical_analysis_file)))
for name in train_config['numerical_columns']:
if name == target_name or name == key_name:
continue
transform_config = train_config['transforms'].get(name, {})
transform_name = transform_config.get('transform', None)
if transform_name == 'scale':
value = float(transform_config.get('value', 1.0))
features[name] = _scale_tensor(
features[name],
range_min=numerical_anlysis[name]['min'],
range_max=numerical_anlysis[name]['max'],
scale_min=-value,
scale_max=value)
elif transform_name == 'identity' or transform_name is None:
pass
else:
raise ValueError(('For numerical variables, only scale '
'and identity are supported: '
'Error for %s') % name)
# Do target transform if it exists.
if target is not None:
with tf.name_scope('target_feature_preprocess'):
if target_name in train_config['categorical_columns']:
labels = train_config['vocab_stats'][target_name]['labels']
table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)
target = table.lookup(target)
# target = tf.contrib.lookup.string_to_index(target, labels)
# Do categorical transforms. Only apply vocab mapping. The real
# transforms are done with tf learn column features.
with tf.name_scope('categorical_feature_preprocess'):
for name in train_config['categorical_columns']:
if name == key_name or name == target_name:
continue
transform_config = train_config['transforms'].get(name, {})
transform_name = transform_config.get('transform', None)
if is_dnn_model(model_type):
if transform_name == 'embedding' or transform_name == 'one_hot' or transform_name is None:
map_vocab = True
else:
raise ValueError('Unknown transform %s' % transform_name)
elif is_linear_model(model_type):
if (transform_name == 'one_hot' or transform_name is None):
map_vocab = True
elif transform_name == 'embedding':
map_vocab = False
else:
raise ValueError('Unknown transform %s' % transform_name)
if map_vocab:
labels = train_config['vocab_stats'][name]['labels']
table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)
features[name] = table.lookup(features[name])
return features, target |
def apply_dependencies(self):
"""Creates dependencies links between elements.
:return: None
"""
self.hosts.apply_dependencies()
self.services.apply_dependencies(self.hosts) | Creates dependencies links between elements.
:return: None | Below is the the instruction that describes the task:
### Input:
Creates dependencies links between elements.
:return: None
### Response:
def apply_dependencies(self):
"""Creates dependencies links between elements.
:return: None
"""
self.hosts.apply_dependencies()
self.services.apply_dependencies(self.hosts) |
def entries(self, start = None, end = None):
'''Retrieves entries from all people/tasks logged to this project.
Can be filtered based on time by specifying start/end datetimes.'''
if not start:
start = self.earliest_record
if not end:
end = self.latest_record
fr = start.strftime('%Y%m%d')
to = end.strftime('%Y%m%d')
url = str.format(
'projects/{}/entries?from={}&to={}',
self.id,
fr,
to,
)
response = self.hv.get_request(url)
return [Entry(self.hv, ej['day_entry']) for ej in response] | Retrieves entries from all people/tasks logged to this project.
Can be filtered based on time by specifying start/end datetimes. | Below is the the instruction that describes the task:
### Input:
Retrieves entries from all people/tasks logged to this project.
Can be filtered based on time by specifying start/end datetimes.
### Response:
def entries(self, start = None, end = None):
'''Retrieves entries from all people/tasks logged to this project.
Can be filtered based on time by specifying start/end datetimes.'''
if not start:
start = self.earliest_record
if not end:
end = self.latest_record
fr = start.strftime('%Y%m%d')
to = end.strftime('%Y%m%d')
url = str.format(
'projects/{}/entries?from={}&to={}',
self.id,
fr,
to,
)
response = self.hv.get_request(url)
return [Entry(self.hv, ej['day_entry']) for ej in response] |
def load(self, service_name, api_version=None, cached=True):
"""
Loads the desired JSON for a service. (uncached)
This will fall back through all the ``data_dirs`` provided to the
constructor, returning the **first** one it finds.
:param service_name: The name of the desired service
:type service_name: string
:param api_version: (Optional) The desired API version to load
:type service_name: string
:param cached: (Optional) Whether or not the cache should be used
when attempting to load the data. Default is ``True``.
:type cached: boolean
:returns: The loaded JSON as a dict
"""
# Fetch from the cache first if it's there.
if cached:
if service_name in self._loaded_data:
if api_version in self._loaded_data[service_name]:
return self._loaded_data[service_name][api_version]
data = {}
options = self.get_available_options(service_name)
match, version = self.get_best_match(
options,
service_name,
api_version=api_version
)
with open(match, 'r') as json_file:
data = json.load(json_file)
# Embed where we found it from for debugging purposes.
data['__file__'] = match
data['api_version'] = version
if cached:
self._loaded_data.setdefault(service_name, {})
self._loaded_data[service_name][api_version] = data
return data | Loads the desired JSON for a service. (uncached)
This will fall back through all the ``data_dirs`` provided to the
constructor, returning the **first** one it finds.
:param service_name: The name of the desired service
:type service_name: string
:param api_version: (Optional) The desired API version to load
:type service_name: string
:param cached: (Optional) Whether or not the cache should be used
when attempting to load the data. Default is ``True``.
:type cached: boolean
:returns: The loaded JSON as a dict | Below is the the instruction that describes the task:
### Input:
Loads the desired JSON for a service. (uncached)
This will fall back through all the ``data_dirs`` provided to the
constructor, returning the **first** one it finds.
:param service_name: The name of the desired service
:type service_name: string
:param api_version: (Optional) The desired API version to load
:type service_name: string
:param cached: (Optional) Whether or not the cache should be used
when attempting to load the data. Default is ``True``.
:type cached: boolean
:returns: The loaded JSON as a dict
### Response:
def load(self, service_name, api_version=None, cached=True):
"""
Loads the desired JSON for a service. (uncached)
This will fall back through all the ``data_dirs`` provided to the
constructor, returning the **first** one it finds.
:param service_name: The name of the desired service
:type service_name: string
:param api_version: (Optional) The desired API version to load
:type service_name: string
:param cached: (Optional) Whether or not the cache should be used
when attempting to load the data. Default is ``True``.
:type cached: boolean
:returns: The loaded JSON as a dict
"""
# Fetch from the cache first if it's there.
if cached:
if service_name in self._loaded_data:
if api_version in self._loaded_data[service_name]:
return self._loaded_data[service_name][api_version]
data = {}
options = self.get_available_options(service_name)
match, version = self.get_best_match(
options,
service_name,
api_version=api_version
)
with open(match, 'r') as json_file:
data = json.load(json_file)
# Embed where we found it from for debugging purposes.
data['__file__'] = match
data['api_version'] = version
if cached:
self._loaded_data.setdefault(service_name, {})
self._loaded_data[service_name][api_version] = data
return data |
def get(self):
"""Get specific information about this hub."""
output = helm("get", self.release)
if output.returncode != 0:
print("Something went wrong!")
print(output.stderr)
else:
print(output.stdout) | Get specific information about this hub. | Below is the the instruction that describes the task:
### Input:
Get specific information about this hub.
### Response:
def get(self):
"""Get specific information about this hub."""
output = helm("get", self.release)
if output.returncode != 0:
print("Something went wrong!")
print(output.stderr)
else:
print(output.stdout) |
def get_file(self, file_hash, save_file_at):
""" Get the scan results for a file.
Even if you do not have a Private Mass API key that you can use, you can still download files from the
VirusTotal storage making use of your VirusTotal Intelligence quota, i.e. programmatic downloads will
also deduct quota.
:param file_hash: You may use either the md5, sha1 or sha256 hash of the file in order to download it.
:param save_file_at: Path of where to save the file.
"""
params = {'hash': file_hash, 'apikey': self.api_key}
try:
response = requests.get(self.base + 'download/', params=params, proxies=self.proxies, stream=True)
except requests.RequestException as e:
return dict(error=e.message)
if response.status_code == requests.codes.ok:
self.save_downloaded_file(file_hash, save_file_at, response.content)
return response.content
elif response.status_code == 403:
return dict(error='You tried to perform calls to functions for which you require a Private API key.',
response_code=response.status_code)
elif response.status_code == 404:
return dict(error='File not found.', response_code=response.status_code)
else:
return dict(response_code=response.status_code) | Get the scan results for a file.
Even if you do not have a Private Mass API key that you can use, you can still download files from the
VirusTotal storage making use of your VirusTotal Intelligence quota, i.e. programmatic downloads will
also deduct quota.
:param file_hash: You may use either the md5, sha1 or sha256 hash of the file in order to download it.
:param save_file_at: Path of where to save the file. | Below is the the instruction that describes the task:
### Input:
Get the scan results for a file.
Even if you do not have a Private Mass API key that you can use, you can still download files from the
VirusTotal storage making use of your VirusTotal Intelligence quota, i.e. programmatic downloads will
also deduct quota.
:param file_hash: You may use either the md5, sha1 or sha256 hash of the file in order to download it.
:param save_file_at: Path of where to save the file.
### Response:
def get_file(self, file_hash, save_file_at):
""" Get the scan results for a file.
Even if you do not have a Private Mass API key that you can use, you can still download files from the
VirusTotal storage making use of your VirusTotal Intelligence quota, i.e. programmatic downloads will
also deduct quota.
:param file_hash: You may use either the md5, sha1 or sha256 hash of the file in order to download it.
:param save_file_at: Path of where to save the file.
"""
params = {'hash': file_hash, 'apikey': self.api_key}
try:
response = requests.get(self.base + 'download/', params=params, proxies=self.proxies, stream=True)
except requests.RequestException as e:
return dict(error=e.message)
if response.status_code == requests.codes.ok:
self.save_downloaded_file(file_hash, save_file_at, response.content)
return response.content
elif response.status_code == 403:
return dict(error='You tried to perform calls to functions for which you require a Private API key.',
response_code=response.status_code)
elif response.status_code == 404:
return dict(error='File not found.', response_code=response.status_code)
else:
return dict(response_code=response.status_code) |
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line) | Process a single token. | Below is the the instruction that describes the task:
### Input:
Process a single token.
### Response:
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line) |
def _set_formatter(self):
"""
Inspects config and sets the name of the formatter to either "json" or "text"
as instance attr. If not present in config, default is "text"
"""
if hasattr(self._config, "formatter") and self._config.formatter == "json":
self._formatter = "json"
else:
self._formatter = "text" | Inspects config and sets the name of the formatter to either "json" or "text"
as instance attr. If not present in config, default is "text" | Below is the the instruction that describes the task:
### Input:
Inspects config and sets the name of the formatter to either "json" or "text"
as instance attr. If not present in config, default is "text"
### Response:
def _set_formatter(self):
"""
Inspects config and sets the name of the formatter to either "json" or "text"
as instance attr. If not present in config, default is "text"
"""
if hasattr(self._config, "formatter") and self._config.formatter == "json":
self._formatter = "json"
else:
self._formatter = "text" |
def replace_tax_rate_by_id(cls, tax_rate_id, tax_rate, **kwargs):
"""Replace TaxRate
Replace all attributes of TaxRate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_tax_rate_by_id(tax_rate_id, tax_rate, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to replace (required)
:param TaxRate tax_rate: Attributes of taxRate to replace (required)
:return: TaxRate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
else:
(data) = cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
return data | Replace TaxRate
Replace all attributes of TaxRate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_tax_rate_by_id(tax_rate_id, tax_rate, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to replace (required)
:param TaxRate tax_rate: Attributes of taxRate to replace (required)
:return: TaxRate
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Replace TaxRate
Replace all attributes of TaxRate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_tax_rate_by_id(tax_rate_id, tax_rate, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to replace (required)
:param TaxRate tax_rate: Attributes of taxRate to replace (required)
:return: TaxRate
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_tax_rate_by_id(cls, tax_rate_id, tax_rate, **kwargs):
"""Replace TaxRate
Replace all attributes of TaxRate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_tax_rate_by_id(tax_rate_id, tax_rate, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to replace (required)
:param TaxRate tax_rate: Attributes of taxRate to replace (required)
:return: TaxRate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
else:
(data) = cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
return data |
def param_to_array(*param):
"""
Convert an arbitrary number of parameters to :class:ndarray class objects.
This is for converting parameter objects to numpy arrays, when using
scipy.weave.inline routine. In scipy.weave.blitz there is no automatic
array detection (even when the array inherits from :class:ndarray)
"""
import warnings
warnings.warn("Please use param.values, as this function will be deprecated in the next release.", DeprecationWarning)
assert len(param) > 0, "At least one parameter needed"
if len(param) == 1:
return param[0].view(np.ndarray)
return [x.view(np.ndarray) for x in param] | Convert an arbitrary number of parameters to :class:ndarray class objects.
This is for converting parameter objects to numpy arrays, when using
scipy.weave.inline routine. In scipy.weave.blitz there is no automatic
array detection (even when the array inherits from :class:ndarray) | Below is the the instruction that describes the task:
### Input:
Convert an arbitrary number of parameters to :class:ndarray class objects.
This is for converting parameter objects to numpy arrays, when using
scipy.weave.inline routine. In scipy.weave.blitz there is no automatic
array detection (even when the array inherits from :class:ndarray)
### Response:
def param_to_array(*param):
"""
Convert an arbitrary number of parameters to :class:ndarray class objects.
This is for converting parameter objects to numpy arrays, when using
scipy.weave.inline routine. In scipy.weave.blitz there is no automatic
array detection (even when the array inherits from :class:ndarray)
"""
import warnings
warnings.warn("Please use param.values, as this function will be deprecated in the next release.", DeprecationWarning)
assert len(param) > 0, "At least one parameter needed"
if len(param) == 1:
return param[0].view(np.ndarray)
return [x.view(np.ndarray) for x in param] |
def partition_all(s, sep):
"""
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings.
If sep is a list, all separators are evaluated.
:param s: The string to split.
:param sep: A separator string or a list of separator strings.
:return: A list of parts split by sep
"""
if isinstance(sep, list):
parts = _partition_all_internal(s, sep[0])
sep = sep[1:]
for s in sep:
tmp = []
for p in parts:
tmp.extend(_partition_all_internal(p, s))
parts = tmp
return parts
else:
return _partition_all_internal(s, sep) | Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings.
If sep is a list, all separators are evaluated.
:param s: The string to split.
:param sep: A separator string or a list of separator strings.
:return: A list of parts split by sep | Below is the the instruction that describes the task:
### Input:
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings.
If sep is a list, all separators are evaluated.
:param s: The string to split.
:param sep: A separator string or a list of separator strings.
:return: A list of parts split by sep
### Response:
def partition_all(s, sep):
"""
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings.
If sep is a list, all separators are evaluated.
:param s: The string to split.
:param sep: A separator string or a list of separator strings.
:return: A list of parts split by sep
"""
if isinstance(sep, list):
parts = _partition_all_internal(s, sep[0])
sep = sep[1:]
for s in sep:
tmp = []
for p in parts:
tmp.extend(_partition_all_internal(p, s))
parts = tmp
return parts
else:
return _partition_all_internal(s, sep) |
def fail(self, err='MockupDB query failure', *args, **kwargs):
"""Reply to a query with the QueryFailure flag and an '$err' key.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
kwargs.setdefault('flags', 0)
kwargs['flags'] |= REPLY_FLAGS['QueryFailure']
kwargs['$err'] = err
self.replies(*args, **kwargs)
return True | Reply to a query with the QueryFailure flag and an '$err' key.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler. | Below is the the instruction that describes the task:
### Input:
Reply to a query with the QueryFailure flag and an '$err' key.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
### Response:
def fail(self, err='MockupDB query failure', *args, **kwargs):
"""Reply to a query with the QueryFailure flag and an '$err' key.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
kwargs.setdefault('flags', 0)
kwargs['flags'] |= REPLY_FLAGS['QueryFailure']
kwargs['$err'] = err
self.replies(*args, **kwargs)
return True |
def get_level_nodes(self, level):
"""!
@brief Traverses CF-tree to obtain nodes at the specified level.
@param[in] level (uint): CF-tree level from that nodes should be returned.
@return (list) List of CF-nodes that are located on the specified level of the CF-tree.
"""
level_nodes = [];
if (level < self.__height):
level_nodes = self.__recursive_get_level_nodes(level, self.__root);
return level_nodes; | !
@brief Traverses CF-tree to obtain nodes at the specified level.
@param[in] level (uint): CF-tree level from that nodes should be returned.
@return (list) List of CF-nodes that are located on the specified level of the CF-tree. | Below is the the instruction that describes the task:
### Input:
!
@brief Traverses CF-tree to obtain nodes at the specified level.
@param[in] level (uint): CF-tree level from that nodes should be returned.
@return (list) List of CF-nodes that are located on the specified level of the CF-tree.
### Response:
def get_level_nodes(self, level):
"""!
@brief Traverses CF-tree to obtain nodes at the specified level.
@param[in] level (uint): CF-tree level from that nodes should be returned.
@return (list) List of CF-nodes that are located on the specified level of the CF-tree.
"""
level_nodes = [];
if (level < self.__height):
level_nodes = self.__recursive_get_level_nodes(level, self.__root);
return level_nodes; |
def dropSpans(spans, text):
"""
Drop from text the blocks identified in :param spans:, possibly nested.
"""
spans.sort()
res = ''
offset = 0
for s, e in spans:
if offset <= s: # handle nesting
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res | Drop from text the blocks identified in :param spans:, possibly nested. | Below is the the instruction that describes the task:
### Input:
Drop from text the blocks identified in :param spans:, possibly nested.
### Response:
def dropSpans(spans, text):
"""
Drop from text the blocks identified in :param spans:, possibly nested.
"""
spans.sort()
res = ''
offset = 0
for s, e in spans:
if offset <= s: # handle nesting
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res |
def linear_rescale(image, in_range=(0, 1), out_range=(1, 255)):
"""
Linear rescaling.
Attributes
----------
image : numpy ndarray
Image array to rescale.
in_range : list, int, optional, (default: [0,1])
Image min/max value to rescale.
out_range : list, int, optional, (default: [1,255])
output min/max bounds to rescale to.
Returns
-------
out : numpy ndarray
returns rescaled image array.
"""
imin, imax = in_range
omin, omax = out_range
image = np.clip(image, imin, imax) - imin
image = image / np.float(imax - imin)
return image * (omax - omin) + omin | Linear rescaling.
Attributes
----------
image : numpy ndarray
Image array to rescale.
in_range : list, int, optional, (default: [0,1])
Image min/max value to rescale.
out_range : list, int, optional, (default: [1,255])
output min/max bounds to rescale to.
Returns
-------
out : numpy ndarray
returns rescaled image array. | Below is the the instruction that describes the task:
### Input:
Linear rescaling.
Attributes
----------
image : numpy ndarray
Image array to rescale.
in_range : list, int, optional, (default: [0,1])
Image min/max value to rescale.
out_range : list, int, optional, (default: [1,255])
output min/max bounds to rescale to.
Returns
-------
out : numpy ndarray
returns rescaled image array.
### Response:
def linear_rescale(image, in_range=(0, 1), out_range=(1, 255)):
"""
Linear rescaling.
Attributes
----------
image : numpy ndarray
Image array to rescale.
in_range : list, int, optional, (default: [0,1])
Image min/max value to rescale.
out_range : list, int, optional, (default: [1,255])
output min/max bounds to rescale to.
Returns
-------
out : numpy ndarray
returns rescaled image array.
"""
imin, imax = in_range
omin, omax = out_range
image = np.clip(image, imin, imax) - imin
image = image / np.float(imax - imin)
return image * (omax - omin) + omin |
def _run_setup_py(self, args, echo=True, echo2=True, ff=''):
"""Run setup.py with monkey-patched setuptools.
The patch forces setuptools to use the file-finder 'ff'.
If 'ff' is the empty string, the patch is not applied.
'args' is the list of arguments that should be passed to
setup.py.
"""
python = self.python
if ff:
setup_py = '-c"%s"' % (RUN_SETUP % locals())
else:
setup_py = 'setup.py %s' % ' '.join(args)
rc, lines = self.process.popen(
'"%(python)s" %(setup_py)s' % locals(), echo=echo, echo2=echo2)
return rc, lines | Run setup.py with monkey-patched setuptools.
The patch forces setuptools to use the file-finder 'ff'.
If 'ff' is the empty string, the patch is not applied.
'args' is the list of arguments that should be passed to
setup.py. | Below is the the instruction that describes the task:
### Input:
Run setup.py with monkey-patched setuptools.
The patch forces setuptools to use the file-finder 'ff'.
If 'ff' is the empty string, the patch is not applied.
'args' is the list of arguments that should be passed to
setup.py.
### Response:
def _run_setup_py(self, args, echo=True, echo2=True, ff=''):
"""Run setup.py with monkey-patched setuptools.
The patch forces setuptools to use the file-finder 'ff'.
If 'ff' is the empty string, the patch is not applied.
'args' is the list of arguments that should be passed to
setup.py.
"""
python = self.python
if ff:
setup_py = '-c"%s"' % (RUN_SETUP % locals())
else:
setup_py = 'setup.py %s' % ' '.join(args)
rc, lines = self.process.popen(
'"%(python)s" %(setup_py)s' % locals(), echo=echo, echo2=echo2)
return rc, lines |
def find_gui_and_backend(gui=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.zmq.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend | Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.zmq.pylab.backend_inline'). | Below is the the instruction that describes the task:
### Input:
Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.zmq.pylab.backend_inline').
### Response:
def find_gui_and_backend(gui=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.zmq.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend |
def config_stdio(self, log_configurations: Optional[List[LogConfiguration]] = None, default_level=logging.INFO) -> None:
"""
Configure the stdio `StreamHandler` levels on the specified loggers.
If no log configurations are specified then the `default_level` will be applied to all handlers.
Args:
log_configurations: a list of (component name, log level) tuples
default_level: logging level to apply when no log_configurations are specified
"""
# no configuration specified, apply `default_level` to the stdio handler of all known loggers
if not log_configurations:
for logger in self.loggers.values():
self._restrict_output(logger, default_level)
# only apply specified configuration to the stdio `StreamHandler` of the specific component
else:
for component, level in log_configurations:
try:
logger = self.loggers[self.root + component]
except KeyError:
raise ValueError("Failed to configure component. Invalid name: {}".format(component))
self._restrict_output(logger, level) | Configure the stdio `StreamHandler` levels on the specified loggers.
If no log configurations are specified then the `default_level` will be applied to all handlers.
Args:
log_configurations: a list of (component name, log level) tuples
default_level: logging level to apply when no log_configurations are specified | Below is the the instruction that describes the task:
### Input:
Configure the stdio `StreamHandler` levels on the specified loggers.
If no log configurations are specified then the `default_level` will be applied to all handlers.
Args:
log_configurations: a list of (component name, log level) tuples
default_level: logging level to apply when no log_configurations are specified
### Response:
def config_stdio(self, log_configurations: Optional[List[LogConfiguration]] = None, default_level=logging.INFO) -> None:
"""
Configure the stdio `StreamHandler` levels on the specified loggers.
If no log configurations are specified then the `default_level` will be applied to all handlers.
Args:
log_configurations: a list of (component name, log level) tuples
default_level: logging level to apply when no log_configurations are specified
"""
# no configuration specified, apply `default_level` to the stdio handler of all known loggers
if not log_configurations:
for logger in self.loggers.values():
self._restrict_output(logger, default_level)
# only apply specified configuration to the stdio `StreamHandler` of the specific component
else:
for component, level in log_configurations:
try:
logger = self.loggers[self.root + component]
except KeyError:
raise ValueError("Failed to configure component. Invalid name: {}".format(component))
self._restrict_output(logger, level) |
def fingerprint(txt):
"""
takes a string and truncates to standard form for data matching.
Based on the spec at OpenRefine
https://github.com/OpenRefine/OpenRefine/wiki/Clustering-In-Depth#fingerprint
- remove leading and trailing whitespace
- change all characters to their lowercase representation
- remove all punctuation and control characters
- split the string into whitespace-separated tokens
- sort the tokens and remove duplicates
- join the tokens back together
- normalize extended western characters to their ASCII representation (for example "gödel" → "godel")
"""
raw_text = txt.upper() #.strip(' ').replace('\n','')
tokens = sorted(list(set(raw_text.split(' '))))
#print('tokens = ', tokens)
res = ''.join([strip_nonalpha(t) for t in tokens])
return res | takes a string and truncates to standard form for data matching.
Based on the spec at OpenRefine
https://github.com/OpenRefine/OpenRefine/wiki/Clustering-In-Depth#fingerprint
- remove leading and trailing whitespace
- change all characters to their lowercase representation
- remove all punctuation and control characters
- split the string into whitespace-separated tokens
- sort the tokens and remove duplicates
- join the tokens back together
- normalize extended western characters to their ASCII representation (for example "gödel" → "godel") | Below is the the instruction that describes the task:
### Input:
takes a string and truncates to standard form for data matching.
Based on the spec at OpenRefine
https://github.com/OpenRefine/OpenRefine/wiki/Clustering-In-Depth#fingerprint
- remove leading and trailing whitespace
- change all characters to their lowercase representation
- remove all punctuation and control characters
- split the string into whitespace-separated tokens
- sort the tokens and remove duplicates
- join the tokens back together
- normalize extended western characters to their ASCII representation (for example "gödel" → "godel")
### Response:
def fingerprint(txt):
"""
takes a string and truncates to standard form for data matching.
Based on the spec at OpenRefine
https://github.com/OpenRefine/OpenRefine/wiki/Clustering-In-Depth#fingerprint
- remove leading and trailing whitespace
- change all characters to their lowercase representation
- remove all punctuation and control characters
- split the string into whitespace-separated tokens
- sort the tokens and remove duplicates
- join the tokens back together
- normalize extended western characters to their ASCII representation (for example "gödel" → "godel")
"""
raw_text = txt.upper() #.strip(' ').replace('\n','')
tokens = sorted(list(set(raw_text.split(' '))))
#print('tokens = ', tokens)
res = ''.join([strip_nonalpha(t) for t in tokens])
return res |
def takeAt( self, index ):
"""
Removes the widget from the rollout at the inputed index.
:param index | <int>
:return <QWidget> || None
"""
layout = self.widget().layout()
item = layout.takeAt(index)
if ( not item ):
return None
return item.widget().widget() | Removes the widget from the rollout at the inputed index.
:param index | <int>
:return <QWidget> || None | Below is the the instruction that describes the task:
### Input:
Removes the widget from the rollout at the inputed index.
:param index | <int>
:return <QWidget> || None
### Response:
def takeAt( self, index ):
"""
Removes the widget from the rollout at the inputed index.
:param index | <int>
:return <QWidget> || None
"""
layout = self.widget().layout()
item = layout.takeAt(index)
if ( not item ):
return None
return item.widget().widget() |
def declone_3rad(data, sample):
"""
3rad uses random adapters to identify pcr duplicates. We will
remove pcr dupes here. Basically append the radom adapter to
each sequence, do a regular old vsearch derep, then trim
off the adapter, and push it down the pipeline. This will
remove all identical seqs with identical random i5 adapters.
"""
LOGGER.info("Entering declone_3rad - {}".format(sample.name))
## Append i5 adapter to the head of each read. Merged file is input, and
## still has fq qual score so also have to append several qscores for the
## adapter bases. Open the merge file, get quarts, go through each read
## and append the necessary stuff.
adapter_seqs_file = tempfile.NamedTemporaryFile(mode='wb',
delete=False,
dir=data.dirs.edits,
suffix="_append_adapters_.fastq")
try:
with open(sample.files.edits[0][0]) as infile:
quarts = itertools.izip(*[iter(infile)]*4)
## a list to store until writing
writing = []
counts = 0
while 1:
try:
read = quarts.next()
except StopIteration:
break
## Split on +, get [1], split on "_" (can be either _r1 or
## _m1 if merged reads) and get [0] for the i5
## prepend "EEEEEEEE" as qscore for the adapters
i5 = read[0].split("+")[1].split("_")[0]
## If any non ACGT in the i5 then drop this sequence
if 'N' in i5:
continue
writing.append("\n".join([
read[0].strip(),
i5 + read[1].strip(),
read[2].strip(),
"E"*8 + read[3].strip()]
))
## Write the data in chunks
counts += 1
if not counts % 1000:
adapter_seqs_file.write("\n".join(writing)+"\n")
writing = []
if writing:
adapter_seqs_file.write("\n".join(writing))
adapter_seqs_file.close()
tmp_outfile = tempfile.NamedTemporaryFile(mode='wb',
delete=False,
dir=data.dirs.edits,
suffix="_decloned_w_adapters_.fastq")
## Close the tmp file bcz vsearch will write to it by name, then
## we will want to reopen it to read from it.
tmp_outfile.close()
## Derep the data (adapters+seq)
derep_and_sort(data, adapter_seqs_file.name,
os.path.join(data.dirs.edits, tmp_outfile.name), 2)
## Remove adapters from head of sequence and write out
## tmp_outfile is now the input file for the next step
## first vsearch derep discards the qscore so we iterate
## by pairs
with open(tmp_outfile.name) as infile:
with open(os.path.join(data.dirs.edits, sample.name+"_declone.fastq"),\
'wb') as outfile:
duo = itertools.izip(*[iter(infile)]*2)
## a list to store until writing
writing = []
counts2 = 0
while 1:
try:
read = duo.next()
except StopIteration:
break
## Peel off the adapters. There's probably a faster
## way of doing this.
writing.append("\n".join([
read[0].strip(),
read[1].strip()[8:]]
))
## Write the data in chunks
counts2 += 1
if not counts2 % 1000:
outfile.write("\n".join(writing)+"\n")
writing = []
if writing:
outfile.write("\n".join(writing))
outfile.close()
LOGGER.info("Removed pcr duplicates from {} - {}".format(sample.name, counts-counts2))
except Exception as inst:
raise IPyradError(" Caught error while decloning "\
+ "3rad data - {}".format(inst))
finally:
## failed samples will cause tmp file removal to raise.
## just ignore it.
try:
## Clean up temp files
if os.path.exists(adapter_seqs_file.name):
os.remove(adapter_seqs_file.name)
if os.path.exists(tmp_outfile.name):
os.remove(tmp_outfile.name)
except Exception as inst:
pass | 3rad uses random adapters to identify pcr duplicates. We will
remove pcr dupes here. Basically append the radom adapter to
each sequence, do a regular old vsearch derep, then trim
off the adapter, and push it down the pipeline. This will
remove all identical seqs with identical random i5 adapters. | Below is the the instruction that describes the task:
### Input:
3rad uses random adapters to identify pcr duplicates. We will
remove pcr dupes here. Basically append the radom adapter to
each sequence, do a regular old vsearch derep, then trim
off the adapter, and push it down the pipeline. This will
remove all identical seqs with identical random i5 adapters.
### Response:
def declone_3rad(data, sample):
"""
3rad uses random adapters to identify pcr duplicates. We will
remove pcr dupes here. Basically append the radom adapter to
each sequence, do a regular old vsearch derep, then trim
off the adapter, and push it down the pipeline. This will
remove all identical seqs with identical random i5 adapters.
"""
LOGGER.info("Entering declone_3rad - {}".format(sample.name))
## Append i5 adapter to the head of each read. Merged file is input, and
## still has fq qual score so also have to append several qscores for the
## adapter bases. Open the merge file, get quarts, go through each read
## and append the necessary stuff.
adapter_seqs_file = tempfile.NamedTemporaryFile(mode='wb',
delete=False,
dir=data.dirs.edits,
suffix="_append_adapters_.fastq")
try:
with open(sample.files.edits[0][0]) as infile:
quarts = itertools.izip(*[iter(infile)]*4)
## a list to store until writing
writing = []
counts = 0
while 1:
try:
read = quarts.next()
except StopIteration:
break
## Split on +, get [1], split on "_" (can be either _r1 or
## _m1 if merged reads) and get [0] for the i5
## prepend "EEEEEEEE" as qscore for the adapters
i5 = read[0].split("+")[1].split("_")[0]
## If any non ACGT in the i5 then drop this sequence
if 'N' in i5:
continue
writing.append("\n".join([
read[0].strip(),
i5 + read[1].strip(),
read[2].strip(),
"E"*8 + read[3].strip()]
))
## Write the data in chunks
counts += 1
if not counts % 1000:
adapter_seqs_file.write("\n".join(writing)+"\n")
writing = []
if writing:
adapter_seqs_file.write("\n".join(writing))
adapter_seqs_file.close()
tmp_outfile = tempfile.NamedTemporaryFile(mode='wb',
delete=False,
dir=data.dirs.edits,
suffix="_decloned_w_adapters_.fastq")
## Close the tmp file bcz vsearch will write to it by name, then
## we will want to reopen it to read from it.
tmp_outfile.close()
## Derep the data (adapters+seq)
derep_and_sort(data, adapter_seqs_file.name,
os.path.join(data.dirs.edits, tmp_outfile.name), 2)
## Remove adapters from head of sequence and write out
## tmp_outfile is now the input file for the next step
## first vsearch derep discards the qscore so we iterate
## by pairs
with open(tmp_outfile.name) as infile:
with open(os.path.join(data.dirs.edits, sample.name+"_declone.fastq"),\
'wb') as outfile:
duo = itertools.izip(*[iter(infile)]*2)
## a list to store until writing
writing = []
counts2 = 0
while 1:
try:
read = duo.next()
except StopIteration:
break
## Peel off the adapters. There's probably a faster
## way of doing this.
writing.append("\n".join([
read[0].strip(),
read[1].strip()[8:]]
))
## Write the data in chunks
counts2 += 1
if not counts2 % 1000:
outfile.write("\n".join(writing)+"\n")
writing = []
if writing:
outfile.write("\n".join(writing))
outfile.close()
LOGGER.info("Removed pcr duplicates from {} - {}".format(sample.name, counts-counts2))
except Exception as inst:
raise IPyradError(" Caught error while decloning "\
+ "3rad data - {}".format(inst))
finally:
## failed samples will cause tmp file removal to raise.
## just ignore it.
try:
## Clean up temp files
if os.path.exists(adapter_seqs_file.name):
os.remove(adapter_seqs_file.name)
if os.path.exists(tmp_outfile.name):
os.remove(tmp_outfile.name)
except Exception as inst:
pass |
def add_paragraph(self, text='', style=None):
"""
Return a paragraph newly added to the end of the content in this
cell. If present, *text* is added to the paragraph in a single run.
If specified, the paragraph style *style* is applied. If *style* is
not specified or is |None|, the result is as though the 'Normal'
style was applied. Note that the formatting of text in a cell can be
influenced by the table style. *text* can contain tab (``\\t``)
characters, which are converted to the appropriate XML form for
a tab. *text* can also include newline (``\\n``) or carriage return
(``\\r``) characters, each of which is converted to a line break.
"""
return super(_Cell, self).add_paragraph(text, style) | Return a paragraph newly added to the end of the content in this
cell. If present, *text* is added to the paragraph in a single run.
If specified, the paragraph style *style* is applied. If *style* is
not specified or is |None|, the result is as though the 'Normal'
style was applied. Note that the formatting of text in a cell can be
influenced by the table style. *text* can contain tab (``\\t``)
characters, which are converted to the appropriate XML form for
a tab. *text* can also include newline (``\\n``) or carriage return
(``\\r``) characters, each of which is converted to a line break. | Below is the the instruction that describes the task:
### Input:
Return a paragraph newly added to the end of the content in this
cell. If present, *text* is added to the paragraph in a single run.
If specified, the paragraph style *style* is applied. If *style* is
not specified or is |None|, the result is as though the 'Normal'
style was applied. Note that the formatting of text in a cell can be
influenced by the table style. *text* can contain tab (``\\t``)
characters, which are converted to the appropriate XML form for
a tab. *text* can also include newline (``\\n``) or carriage return
(``\\r``) characters, each of which is converted to a line break.
### Response:
def add_paragraph(self, text='', style=None):
"""
Return a paragraph newly added to the end of the content in this
cell. If present, *text* is added to the paragraph in a single run.
If specified, the paragraph style *style* is applied. If *style* is
not specified or is |None|, the result is as though the 'Normal'
style was applied. Note that the formatting of text in a cell can be
influenced by the table style. *text* can contain tab (``\\t``)
characters, which are converted to the appropriate XML form for
a tab. *text* can also include newline (``\\n``) or carriage return
(``\\r``) characters, each of which is converted to a line break.
"""
return super(_Cell, self).add_paragraph(text, style) |
def base_geodetic_crs(self):
"""The :class:`GeodeticCRS` on which this projection is based."""
base = self.element.find(GML_NS + 'baseGeodeticCRS')
href = base.attrib[XLINK_NS + 'href']
return get(href) | The :class:`GeodeticCRS` on which this projection is based. | Below is the the instruction that describes the task:
### Input:
The :class:`GeodeticCRS` on which this projection is based.
### Response:
def base_geodetic_crs(self):
"""The :class:`GeodeticCRS` on which this projection is based."""
base = self.element.find(GML_NS + 'baseGeodeticCRS')
href = base.attrib[XLINK_NS + 'href']
return get(href) |
def match(self, text, noprefix=False):
"""Matches date/datetime string against date patterns and returns pattern and parsed date if matched.
It's not indeded for common usage, since if successful it returns date as array of numbers and pattern
that matched this date
:param text:
Any human readable string
:type date_string: str|unicode
:param noprefix:
If set True than doesn't use prefix based date patterns filtering settings
:type noprefix: bool
:return: Returns dicts with `values` as array of representing parsed date and 'pattern' with info about matched pattern if successful, else returns None
:rtype: :class:`dict`."""
n = len(text)
if self.cachedpats is not None:
pats = self.cachedpats
else:
pats = self.patterns
if n > 5 and not noprefix:
basekeys = self.__matchPrefix(text[:6])
else:
basekeys = []
for p in pats:
if n < p['length']['min'] or n > p['length']['max']: continue
if p['right'] and len(basekeys) > 0 and p['basekey'] not in basekeys: continue
try:
r = p['pattern'].parseString(text)
# Do sanity check
d = r.asDict()
if 'month' in d:
val = int(d['month'])
if val > 12 or val < 1:
continue
if 'day' in d:
val = int(d['day'])
if val > 31 or val < 1:
continue
return {'values' : r, 'pattern' : p}
except ParseException as e:
# print p['key'], text.encode('utf-8'), e
pass
return None | Matches date/datetime string against date patterns and returns pattern and parsed date if matched.
It's not indeded for common usage, since if successful it returns date as array of numbers and pattern
that matched this date
:param text:
Any human readable string
:type date_string: str|unicode
:param noprefix:
If set True than doesn't use prefix based date patterns filtering settings
:type noprefix: bool
:return: Returns dicts with `values` as array of representing parsed date and 'pattern' with info about matched pattern if successful, else returns None
:rtype: :class:`dict`. | Below is the the instruction that describes the task:
### Input:
Matches date/datetime string against date patterns and returns pattern and parsed date if matched.
It's not indeded for common usage, since if successful it returns date as array of numbers and pattern
that matched this date
:param text:
Any human readable string
:type date_string: str|unicode
:param noprefix:
If set True than doesn't use prefix based date patterns filtering settings
:type noprefix: bool
:return: Returns dicts with `values` as array of representing parsed date and 'pattern' with info about matched pattern if successful, else returns None
:rtype: :class:`dict`.
### Response:
def match(self, text, noprefix=False):
"""Matches date/datetime string against date patterns and returns pattern and parsed date if matched.
It's not indeded for common usage, since if successful it returns date as array of numbers and pattern
that matched this date
:param text:
Any human readable string
:type date_string: str|unicode
:param noprefix:
If set True than doesn't use prefix based date patterns filtering settings
:type noprefix: bool
:return: Returns dicts with `values` as array of representing parsed date and 'pattern' with info about matched pattern if successful, else returns None
:rtype: :class:`dict`."""
n = len(text)
if self.cachedpats is not None:
pats = self.cachedpats
else:
pats = self.patterns
if n > 5 and not noprefix:
basekeys = self.__matchPrefix(text[:6])
else:
basekeys = []
for p in pats:
if n < p['length']['min'] or n > p['length']['max']: continue
if p['right'] and len(basekeys) > 0 and p['basekey'] not in basekeys: continue
try:
r = p['pattern'].parseString(text)
# Do sanity check
d = r.asDict()
if 'month' in d:
val = int(d['month'])
if val > 12 or val < 1:
continue
if 'day' in d:
val = int(d['day'])
if val > 31 or val < 1:
continue
return {'values' : r, 'pattern' : p}
except ParseException as e:
# print p['key'], text.encode('utf-8'), e
pass
return None |
def _set_config(config):
"""Set gl configuration"""
pyglet_config = pyglet.gl.Config()
pyglet_config.red_size = config['red_size']
pyglet_config.green_size = config['green_size']
pyglet_config.blue_size = config['blue_size']
pyglet_config.alpha_size = config['alpha_size']
pyglet_config.accum_red_size = 0
pyglet_config.accum_green_size = 0
pyglet_config.accum_blue_size = 0
pyglet_config.accum_alpha_size = 0
pyglet_config.depth_size = config['depth_size']
pyglet_config.stencil_size = config['stencil_size']
pyglet_config.double_buffer = config['double_buffer']
pyglet_config.stereo = config['stereo']
pyglet_config.samples = config['samples']
return pyglet_config | Set gl configuration | Below is the the instruction that describes the task:
### Input:
Set gl configuration
### Response:
def _set_config(config):
"""Set gl configuration"""
pyglet_config = pyglet.gl.Config()
pyglet_config.red_size = config['red_size']
pyglet_config.green_size = config['green_size']
pyglet_config.blue_size = config['blue_size']
pyglet_config.alpha_size = config['alpha_size']
pyglet_config.accum_red_size = 0
pyglet_config.accum_green_size = 0
pyglet_config.accum_blue_size = 0
pyglet_config.accum_alpha_size = 0
pyglet_config.depth_size = config['depth_size']
pyglet_config.stencil_size = config['stencil_size']
pyglet_config.double_buffer = config['double_buffer']
pyglet_config.stereo = config['stereo']
pyglet_config.samples = config['samples']
return pyglet_config |
def get(self, request, *args, **kwargs):
"""
redirect user to captive page
with the social auth token in the querystring
(which will allow the captive page to send the token to freeradius)
"""
if not request.GET.get('cp'):
return HttpResponse(_('missing cp GET param'), status=400)
self.authorize(request, *args, **kwargs)
return HttpResponseRedirect(self.get_redirect_url(request)) | redirect user to captive page
with the social auth token in the querystring
(which will allow the captive page to send the token to freeradius) | Below is the the instruction that describes the task:
### Input:
redirect user to captive page
with the social auth token in the querystring
(which will allow the captive page to send the token to freeradius)
### Response:
def get(self, request, *args, **kwargs):
"""
redirect user to captive page
with the social auth token in the querystring
(which will allow the captive page to send the token to freeradius)
"""
if not request.GET.get('cp'):
return HttpResponse(_('missing cp GET param'), status=400)
self.authorize(request, *args, **kwargs)
return HttpResponseRedirect(self.get_redirect_url(request)) |
def _parse_hostname(self):
"""Parses the global config and returns the hostname value
Returns:
dict: The configured value for hostname. The returned dict
object is intended to be merged into the resource dict
"""
value = 'localhost'
match = re.search(r'^hostname ([^\s]+)$', self.config, re.M)
if match:
value = match.group(1)
return dict(hostname=value) | Parses the global config and returns the hostname value
Returns:
dict: The configured value for hostname. The returned dict
object is intended to be merged into the resource dict | Below is the the instruction that describes the task:
### Input:
Parses the global config and returns the hostname value
Returns:
dict: The configured value for hostname. The returned dict
object is intended to be merged into the resource dict
### Response:
def _parse_hostname(self):
"""Parses the global config and returns the hostname value
Returns:
dict: The configured value for hostname. The returned dict
object is intended to be merged into the resource dict
"""
value = 'localhost'
match = re.search(r'^hostname ([^\s]+)$', self.config, re.M)
if match:
value = match.group(1)
return dict(hostname=value) |
def comment_set(self):
""" Get the comments that have been submitted for the chat
"""
ct = ContentType.objects.get_for_model(self.__class__)
qs = Comment.objects.filter(
content_type=ct,
object_pk=self.pk)
qs = qs.exclude(is_removed=True)
qs = qs.order_by('-submit_date')
return qs | Get the comments that have been submitted for the chat | Below is the the instruction that describes the task:
### Input:
Get the comments that have been submitted for the chat
### Response:
def comment_set(self):
""" Get the comments that have been submitted for the chat
"""
ct = ContentType.objects.get_for_model(self.__class__)
qs = Comment.objects.filter(
content_type=ct,
object_pk=self.pk)
qs = qs.exclude(is_removed=True)
qs = qs.order_by('-submit_date')
return qs |
def detail_poi(self, **kwargs):
"""Obtain detailed info of a given POI.
Args:
family (str): Family code of the POI (3 chars).
lang (str): Language code (*es* or *en*).
id (int): Optional, ID of the POI to query. Passing value -1 will
result in information from all POIs.
Returns:
Status boolean and parsed response (list[PoiDetails]), or
message string in case of error.
"""
# Endpoint parameters
params = {
'language': util.language_code(kwargs.get('lang')),
'family': kwargs.get('family')
}
if kwargs.get('id'):
params['id'] = kwargs['id']
# Request
result = self.make_request('detail_poi', {}, **params)
if not util.check_result(result):
return False, result.get('message', 'UNKNOWN ERROR')
# Parse
values = util.response_list(result, 'Data')
return True, [emtype.PoiDetails(**a) for a in values] | Obtain detailed info of a given POI.
Args:
family (str): Family code of the POI (3 chars).
lang (str): Language code (*es* or *en*).
id (int): Optional, ID of the POI to query. Passing value -1 will
result in information from all POIs.
Returns:
Status boolean and parsed response (list[PoiDetails]), or
message string in case of error. | Below is the the instruction that describes the task:
### Input:
Obtain detailed info of a given POI.
Args:
family (str): Family code of the POI (3 chars).
lang (str): Language code (*es* or *en*).
id (int): Optional, ID of the POI to query. Passing value -1 will
result in information from all POIs.
Returns:
Status boolean and parsed response (list[PoiDetails]), or
message string in case of error.
### Response:
def detail_poi(self, **kwargs):
"""Obtain detailed info of a given POI.
Args:
family (str): Family code of the POI (3 chars).
lang (str): Language code (*es* or *en*).
id (int): Optional, ID of the POI to query. Passing value -1 will
result in information from all POIs.
Returns:
Status boolean and parsed response (list[PoiDetails]), or
message string in case of error.
"""
# Endpoint parameters
params = {
'language': util.language_code(kwargs.get('lang')),
'family': kwargs.get('family')
}
if kwargs.get('id'):
params['id'] = kwargs['id']
# Request
result = self.make_request('detail_poi', {}, **params)
if not util.check_result(result):
return False, result.get('message', 'UNKNOWN ERROR')
# Parse
values = util.response_list(result, 'Data')
return True, [emtype.PoiDetails(**a) for a in values] |
def cited_names_from_aux_file(stream):
"""Parse a LaTeX ".aux" file and generate a list of names cited according to
LaTeX ``\\citation`` commands. Repeated names are generated only once. The
argument should be a opened I/O stream.
"""
cited = set()
for line in stream:
if not line.startswith(r'\citation{'):
continue
line = line.rstrip()
if line[-1] != '}':
continue # should issue a warning or something
entries = line[10:-1]
for name in entries.split(','):
name = name.strip()
if name not in cited:
yield name
cited.add(name) | Parse a LaTeX ".aux" file and generate a list of names cited according to
LaTeX ``\\citation`` commands. Repeated names are generated only once. The
argument should be a opened I/O stream. | Below is the the instruction that describes the task:
### Input:
Parse a LaTeX ".aux" file and generate a list of names cited according to
LaTeX ``\\citation`` commands. Repeated names are generated only once. The
argument should be a opened I/O stream.
### Response:
def cited_names_from_aux_file(stream):
"""Parse a LaTeX ".aux" file and generate a list of names cited according to
LaTeX ``\\citation`` commands. Repeated names are generated only once. The
argument should be a opened I/O stream.
"""
cited = set()
for line in stream:
if not line.startswith(r'\citation{'):
continue
line = line.rstrip()
if line[-1] != '}':
continue # should issue a warning or something
entries = line[10:-1]
for name in entries.split(','):
name = name.strip()
if name not in cited:
yield name
cited.add(name) |
def title(self, value):
"""
Setter for **self.__title** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"title", value)
self.__title = value | Setter for **self.__title** attribute.
:param value: Attribute value.
:type value: unicode | Below is the the instruction that describes the task:
### Input:
Setter for **self.__title** attribute.
:param value: Attribute value.
:type value: unicode
### Response:
def title(self, value):
"""
Setter for **self.__title** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"title", value)
self.__title = value |
def _handle_keypad_message(self, data):
"""
Handle keypad messages.
:param data: keypad message to parse
:type data: string
:returns: :py:class:`~alarmdecoder.messages.Message`
"""
msg = Message(data)
if self._internal_address_mask & msg.mask > 0:
if not self._ignore_message_states:
self._update_internal_states(msg)
self.on_message(message=msg)
return msg | Handle keypad messages.
:param data: keypad message to parse
:type data: string
:returns: :py:class:`~alarmdecoder.messages.Message` | Below is the the instruction that describes the task:
### Input:
Handle keypad messages.
:param data: keypad message to parse
:type data: string
:returns: :py:class:`~alarmdecoder.messages.Message`
### Response:
def _handle_keypad_message(self, data):
"""
Handle keypad messages.
:param data: keypad message to parse
:type data: string
:returns: :py:class:`~alarmdecoder.messages.Message`
"""
msg = Message(data)
if self._internal_address_mask & msg.mask > 0:
if not self._ignore_message_states:
self._update_internal_states(msg)
self.on_message(message=msg)
return msg |
def _clear_strobes(self):
"""
Resets the "enable" and "load" output streams to all 0.
"""
#reset some stuff
self['SEQ']['GLOBAL_SHIFT_EN'].setall(False)
self['SEQ']['GLOBAL_CTR_LD'].setall(False)
self['SEQ']['GLOBAL_DAC_LD'].setall(False)
self['SEQ']['PIXEL_SHIFT_EN'].setall(False)
self['SEQ']['INJECTION'].setall(False) | Resets the "enable" and "load" output streams to all 0. | Below is the the instruction that describes the task:
### Input:
Resets the "enable" and "load" output streams to all 0.
### Response:
def _clear_strobes(self):
"""
Resets the "enable" and "load" output streams to all 0.
"""
#reset some stuff
self['SEQ']['GLOBAL_SHIFT_EN'].setall(False)
self['SEQ']['GLOBAL_CTR_LD'].setall(False)
self['SEQ']['GLOBAL_DAC_LD'].setall(False)
self['SEQ']['PIXEL_SHIFT_EN'].setall(False)
self['SEQ']['INJECTION'].setall(False) |
def pick_deep(pick_dct, dct):
"""
Implementation of pick that recurses. This tests the same keys at every level of dict and in lists
:param pick_dct: Deep dict matching some portion of dct.
:param dct: Dct to filter. Any key matching pick_dct pass through. It doesn't matter what the pick_dct value
is as long as the key exists. Arrays also pass through if the have matching values in pick_dct
:return:
"""
if isinstance(dict, dct):
# Filter out keys and then recurse on each value that wasn't filtered out
return map_with_obj(
lambda k, v: pick_deep(prop(k, pick_dct), v),
pick(keys(pick_dct), dct)
)
if isinstance((list, tuple), dct):
# run pick_deep on each value
return map(
lambda tup: pick_deep(*tup),
list(zip(pick_dct or [], dct))
)
# scalar
return dct | Implementation of pick that recurses. This tests the same keys at every level of dict and in lists
:param pick_dct: Deep dict matching some portion of dct.
:param dct: Dct to filter. Any key matching pick_dct pass through. It doesn't matter what the pick_dct value
is as long as the key exists. Arrays also pass through if the have matching values in pick_dct
:return: | Below is the the instruction that describes the task:
### Input:
Implementation of pick that recurses. This tests the same keys at every level of dict and in lists
:param pick_dct: Deep dict matching some portion of dct.
:param dct: Dct to filter. Any key matching pick_dct pass through. It doesn't matter what the pick_dct value
is as long as the key exists. Arrays also pass through if the have matching values in pick_dct
:return:
### Response:
def pick_deep(pick_dct, dct):
"""
Implementation of pick that recurses. This tests the same keys at every level of dict and in lists
:param pick_dct: Deep dict matching some portion of dct.
:param dct: Dct to filter. Any key matching pick_dct pass through. It doesn't matter what the pick_dct value
is as long as the key exists. Arrays also pass through if the have matching values in pick_dct
:return:
"""
if isinstance(dict, dct):
# Filter out keys and then recurse on each value that wasn't filtered out
return map_with_obj(
lambda k, v: pick_deep(prop(k, pick_dct), v),
pick(keys(pick_dct), dct)
)
if isinstance((list, tuple), dct):
# run pick_deep on each value
return map(
lambda tup: pick_deep(*tup),
list(zip(pick_dct or [], dct))
)
# scalar
return dct |
def get_language_model(n_tok, emb_sz, n_hid, n_layers, pad_token,
dropout=0.4, dropouth=0.3, dropouti=0.5, dropoute=0.1, wdrop=0.5, tie_weights=True, qrnn=False, bias=False):
"""Returns a SequentialRNN model.
A RNN_Encoder layer is instantiated using the parameters provided.
This is followed by the creation of a LinearDecoder layer.
Also by default (i.e. tie_weights = True), the embedding matrix used in the RNN_Encoder
is used to instantiate the weights for the LinearDecoder layer.
The SequentialRNN layer is the native torch's Sequential wrapper that puts the RNN_Encoder and
LinearDecoder layers sequentially in the model.
Args:
n_tok (int): number of unique vocabulary words (or tokens) in the source dataset
emb_sz (int): the embedding size to use to encode each token
n_hid (int): number of hidden activation per LSTM layer
n_layers (int): number of LSTM layers to use in the architecture
pad_token (int): the int value used for padding text.
dropouth (float): dropout to apply to the activations going from one LSTM layer to another
dropouti (float): dropout to apply to the input layer.
dropoute (float): dropout to apply to the embedding layer.
wdrop (float): dropout used for a LSTM's internal (or hidden) recurrent weights.
tie_weights (bool): decide if the weights of the embedding matrix in the RNN encoder should be tied to the
weights of the LinearDecoder layer.
qrnn (bool): decide if the model is composed of LSTMS (False) or QRNNs (True).
bias (bool): decide if the decoder should have a bias layer or not.
Returns:
A SequentialRNN model
"""
rnn_enc = RNN_Encoder(n_tok, emb_sz, n_hid=n_hid, n_layers=n_layers, pad_token=pad_token,
dropouth=dropouth, dropouti=dropouti, dropoute=dropoute, wdrop=wdrop, qrnn=qrnn)
enc = rnn_enc.encoder if tie_weights else None
return SequentialRNN(rnn_enc, LinearDecoder(n_tok, emb_sz, dropout, tie_encoder=enc, bias=bias)) | Returns a SequentialRNN model.
A RNN_Encoder layer is instantiated using the parameters provided.
This is followed by the creation of a LinearDecoder layer.
Also by default (i.e. tie_weights = True), the embedding matrix used in the RNN_Encoder
is used to instantiate the weights for the LinearDecoder layer.
The SequentialRNN layer is the native torch's Sequential wrapper that puts the RNN_Encoder and
LinearDecoder layers sequentially in the model.
Args:
n_tok (int): number of unique vocabulary words (or tokens) in the source dataset
emb_sz (int): the embedding size to use to encode each token
n_hid (int): number of hidden activation per LSTM layer
n_layers (int): number of LSTM layers to use in the architecture
pad_token (int): the int value used for padding text.
dropouth (float): dropout to apply to the activations going from one LSTM layer to another
dropouti (float): dropout to apply to the input layer.
dropoute (float): dropout to apply to the embedding layer.
wdrop (float): dropout used for a LSTM's internal (or hidden) recurrent weights.
tie_weights (bool): decide if the weights of the embedding matrix in the RNN encoder should be tied to the
weights of the LinearDecoder layer.
qrnn (bool): decide if the model is composed of LSTMS (False) or QRNNs (True).
bias (bool): decide if the decoder should have a bias layer or not.
Returns:
A SequentialRNN model | Below is the the instruction that describes the task:
### Input:
Returns a SequentialRNN model.
A RNN_Encoder layer is instantiated using the parameters provided.
This is followed by the creation of a LinearDecoder layer.
Also by default (i.e. tie_weights = True), the embedding matrix used in the RNN_Encoder
is used to instantiate the weights for the LinearDecoder layer.
The SequentialRNN layer is the native torch's Sequential wrapper that puts the RNN_Encoder and
LinearDecoder layers sequentially in the model.
Args:
n_tok (int): number of unique vocabulary words (or tokens) in the source dataset
emb_sz (int): the embedding size to use to encode each token
n_hid (int): number of hidden activation per LSTM layer
n_layers (int): number of LSTM layers to use in the architecture
pad_token (int): the int value used for padding text.
dropouth (float): dropout to apply to the activations going from one LSTM layer to another
dropouti (float): dropout to apply to the input layer.
dropoute (float): dropout to apply to the embedding layer.
wdrop (float): dropout used for a LSTM's internal (or hidden) recurrent weights.
tie_weights (bool): decide if the weights of the embedding matrix in the RNN encoder should be tied to the
weights of the LinearDecoder layer.
qrnn (bool): decide if the model is composed of LSTMS (False) or QRNNs (True).
bias (bool): decide if the decoder should have a bias layer or not.
Returns:
A SequentialRNN model
### Response:
def get_language_model(n_tok, emb_sz, n_hid, n_layers, pad_token,
dropout=0.4, dropouth=0.3, dropouti=0.5, dropoute=0.1, wdrop=0.5, tie_weights=True, qrnn=False, bias=False):
"""Returns a SequentialRNN model.
A RNN_Encoder layer is instantiated using the parameters provided.
This is followed by the creation of a LinearDecoder layer.
Also by default (i.e. tie_weights = True), the embedding matrix used in the RNN_Encoder
is used to instantiate the weights for the LinearDecoder layer.
The SequentialRNN layer is the native torch's Sequential wrapper that puts the RNN_Encoder and
LinearDecoder layers sequentially in the model.
Args:
n_tok (int): number of unique vocabulary words (or tokens) in the source dataset
emb_sz (int): the embedding size to use to encode each token
n_hid (int): number of hidden activation per LSTM layer
n_layers (int): number of LSTM layers to use in the architecture
pad_token (int): the int value used for padding text.
dropouth (float): dropout to apply to the activations going from one LSTM layer to another
dropouti (float): dropout to apply to the input layer.
dropoute (float): dropout to apply to the embedding layer.
wdrop (float): dropout used for a LSTM's internal (or hidden) recurrent weights.
tie_weights (bool): decide if the weights of the embedding matrix in the RNN encoder should be tied to the
weights of the LinearDecoder layer.
qrnn (bool): decide if the model is composed of LSTMS (False) or QRNNs (True).
bias (bool): decide if the decoder should have a bias layer or not.
Returns:
A SequentialRNN model
"""
rnn_enc = RNN_Encoder(n_tok, emb_sz, n_hid=n_hid, n_layers=n_layers, pad_token=pad_token,
dropouth=dropouth, dropouti=dropouti, dropoute=dropoute, wdrop=wdrop, qrnn=qrnn)
enc = rnn_enc.encoder if tie_weights else None
return SequentialRNN(rnn_enc, LinearDecoder(n_tok, emb_sz, dropout, tie_encoder=enc, bias=bias)) |
def _displayFeatures(self, fig, features, minX, maxX, offsetAdjuster):
"""
Add the given C{features} to the figure in C{fig}.
@param fig: A matplotlib figure.
@param features: A C{FeatureList} instance.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
"""
labels = []
for index, feature in enumerate(features):
fig.plot([offsetAdjuster(feature.start),
offsetAdjuster(feature.end)],
[index * -0.2, index * -0.2], color=feature.color,
linewidth=2)
labels.append(feature.legendLabel())
# Note that minX and maxX do not need to be adjusted by the offset
# adjuster. They are the already-adjusted min/max values as
# computed in computePlotInfo in blast.py
fig.axis([minX, maxX, (len(features) + 1) * -0.2, 0.2])
if labels:
# Put a legend above the figure.
box = fig.get_position()
fig.set_position([box.x0, box.y0,
box.width, box.height * 0.2])
fig.legend(labels, loc='lower center', bbox_to_anchor=(0.5, 1.4),
fancybox=True, shadow=True, ncol=2) | Add the given C{features} to the figure in C{fig}.
@param fig: A matplotlib figure.
@param features: A C{FeatureList} instance.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting. | Below is the the instruction that describes the task:
### Input:
Add the given C{features} to the figure in C{fig}.
@param fig: A matplotlib figure.
@param features: A C{FeatureList} instance.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
### Response:
def _displayFeatures(self, fig, features, minX, maxX, offsetAdjuster):
"""
Add the given C{features} to the figure in C{fig}.
@param fig: A matplotlib figure.
@param features: A C{FeatureList} instance.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
"""
labels = []
for index, feature in enumerate(features):
fig.plot([offsetAdjuster(feature.start),
offsetAdjuster(feature.end)],
[index * -0.2, index * -0.2], color=feature.color,
linewidth=2)
labels.append(feature.legendLabel())
# Note that minX and maxX do not need to be adjusted by the offset
# adjuster. They are the already-adjusted min/max values as
# computed in computePlotInfo in blast.py
fig.axis([minX, maxX, (len(features) + 1) * -0.2, 0.2])
if labels:
# Put a legend above the figure.
box = fig.get_position()
fig.set_position([box.x0, box.y0,
box.width, box.height * 0.2])
fig.legend(labels, loc='lower center', bbox_to_anchor=(0.5, 1.4),
fancybox=True, shadow=True, ncol=2) |
def p_generate(self, p):
'generate : GENERATE generate_items ENDGENERATE'
p[0] = GenerateStatement(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | generate : GENERATE generate_items ENDGENERATE | Below is the the instruction that describes the task:
### Input:
generate : GENERATE generate_items ENDGENERATE
### Response:
def p_generate(self, p):
'generate : GENERATE generate_items ENDGENERATE'
p[0] = GenerateStatement(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def filter(self, limit=None, to=None, category=None):
"""
Returns the events that match the filters
Args:
limit (int, optional): the max length of the events to return (Default value = None)
to (str, optional): only events that have been sent or received by 'to' (Default value = None)
category (str, optional): only events belonging to the category (Default value = None)
Returns:
list: a list of filtered events
"""
if category and not to:
msg_slice = itertools.islice((x for x in self.store if x[2] == category), limit)
elif to and not category:
to = JID.fromstr(to)
msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1])), limit)
elif to and category:
to = JID.fromstr(to)
msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1]) and x[2] == category), limit)
else:
msg_slice = self.all(limit=limit)
return msg_slice
return list(msg_slice)[::-1] | Returns the events that match the filters
Args:
limit (int, optional): the max length of the events to return (Default value = None)
to (str, optional): only events that have been sent or received by 'to' (Default value = None)
category (str, optional): only events belonging to the category (Default value = None)
Returns:
list: a list of filtered events | Below is the the instruction that describes the task:
### Input:
Returns the events that match the filters
Args:
limit (int, optional): the max length of the events to return (Default value = None)
to (str, optional): only events that have been sent or received by 'to' (Default value = None)
category (str, optional): only events belonging to the category (Default value = None)
Returns:
list: a list of filtered events
### Response:
def filter(self, limit=None, to=None, category=None):
"""
Returns the events that match the filters
Args:
limit (int, optional): the max length of the events to return (Default value = None)
to (str, optional): only events that have been sent or received by 'to' (Default value = None)
category (str, optional): only events belonging to the category (Default value = None)
Returns:
list: a list of filtered events
"""
if category and not to:
msg_slice = itertools.islice((x for x in self.store if x[2] == category), limit)
elif to and not category:
to = JID.fromstr(to)
msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1])), limit)
elif to and category:
to = JID.fromstr(to)
msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1]) and x[2] == category), limit)
else:
msg_slice = self.all(limit=limit)
return msg_slice
return list(msg_slice)[::-1] |
def get(protocol, subset, classes=CLASSES, variables=VARIABLES):
'''Returns the data subset given a particular protocol
Parameters
protocol (string): one of the valid protocols supported by this interface
subset (string): one of 'train' or 'test'
classes (list of string): a list of strings containing the names of the
classes from which you want to have the data from
variables (list of strings): a list of strings containg the names of the
variables (features) you want to have data from
Returns:
data (numpy.ndarray): The data for all the classes and variables nicely
packed into one numpy 3D array. One depth represents the data for one
class, one row is one example, one column a given feature.
'''
retval = split_data(bob.db.iris.data(), subset, PROTOCOLS[protocol])
# filter variables (features)
varindex = [VARIABLES.index(k) for k in variables]
# filter class names and variable indexes at the same time
retval = dict([(k, retval[k][:,varindex]) for k in classes])
# squash the data
return numpy.array([retval[k] for k in classes]) | Returns the data subset given a particular protocol
Parameters
protocol (string): one of the valid protocols supported by this interface
subset (string): one of 'train' or 'test'
classes (list of string): a list of strings containing the names of the
classes from which you want to have the data from
variables (list of strings): a list of strings containg the names of the
variables (features) you want to have data from
Returns:
data (numpy.ndarray): The data for all the classes and variables nicely
packed into one numpy 3D array. One depth represents the data for one
class, one row is one example, one column a given feature. | Below is the the instruction that describes the task:
### Input:
Returns the data subset given a particular protocol
Parameters
protocol (string): one of the valid protocols supported by this interface
subset (string): one of 'train' or 'test'
classes (list of string): a list of strings containing the names of the
classes from which you want to have the data from
variables (list of strings): a list of strings containg the names of the
variables (features) you want to have data from
Returns:
data (numpy.ndarray): The data for all the classes and variables nicely
packed into one numpy 3D array. One depth represents the data for one
class, one row is one example, one column a given feature.
### Response:
def get(protocol, subset, classes=CLASSES, variables=VARIABLES):
'''Returns the data subset given a particular protocol
Parameters
protocol (string): one of the valid protocols supported by this interface
subset (string): one of 'train' or 'test'
classes (list of string): a list of strings containing the names of the
classes from which you want to have the data from
variables (list of strings): a list of strings containg the names of the
variables (features) you want to have data from
Returns:
data (numpy.ndarray): The data for all the classes and variables nicely
packed into one numpy 3D array. One depth represents the data for one
class, one row is one example, one column a given feature.
'''
retval = split_data(bob.db.iris.data(), subset, PROTOCOLS[protocol])
# filter variables (features)
varindex = [VARIABLES.index(k) for k in variables]
# filter class names and variable indexes at the same time
retval = dict([(k, retval[k][:,varindex]) for k in classes])
# squash the data
return numpy.array([retval[k] for k in classes]) |
def sliceit(iterable, lower=0, upper=None):
"""Apply a slice on input iterable.
:param iterable: object which provides the method __getitem__ or __iter__.
:param int lower: lower bound from where start to get items.
:param int upper: upper bound from where finish to get items.
:return: sliced object of the same type of iterable if not dict, or specific
object. otherwise, simple list of sliced items.
:rtype: Iterable
"""
if upper is None:
upper = len(iterable)
try:
result = iterable[lower: upper]
except TypeError: # if iterable does not implement the slice method
result = []
if lower < 0: # ensure lower is positive
lower += len(iterable)
if upper < 0: # ensure upper is positive
upper += len(iterable)
if upper > lower:
iterator = iter(iterable)
for index in range(upper):
try:
value = next(iterator)
except StopIteration:
break
else:
if index >= lower:
result.append(value)
iterablecls = iterable.__class__
if not(isinstance(result, iterablecls) or issubclass(iterablecls, dict)):
try:
result = iterablecls(result)
except TypeError:
pass
return result | Apply a slice on input iterable.
:param iterable: object which provides the method __getitem__ or __iter__.
:param int lower: lower bound from where start to get items.
:param int upper: upper bound from where finish to get items.
:return: sliced object of the same type of iterable if not dict, or specific
object. otherwise, simple list of sliced items.
:rtype: Iterable | Below is the the instruction that describes the task:
### Input:
Apply a slice on input iterable.
:param iterable: object which provides the method __getitem__ or __iter__.
:param int lower: lower bound from where start to get items.
:param int upper: upper bound from where finish to get items.
:return: sliced object of the same type of iterable if not dict, or specific
object. otherwise, simple list of sliced items.
:rtype: Iterable
### Response:
def sliceit(iterable, lower=0, upper=None):
"""Apply a slice on input iterable.
:param iterable: object which provides the method __getitem__ or __iter__.
:param int lower: lower bound from where start to get items.
:param int upper: upper bound from where finish to get items.
:return: sliced object of the same type of iterable if not dict, or specific
object. otherwise, simple list of sliced items.
:rtype: Iterable
"""
if upper is None:
upper = len(iterable)
try:
result = iterable[lower: upper]
except TypeError: # if iterable does not implement the slice method
result = []
if lower < 0: # ensure lower is positive
lower += len(iterable)
if upper < 0: # ensure upper is positive
upper += len(iterable)
if upper > lower:
iterator = iter(iterable)
for index in range(upper):
try:
value = next(iterator)
except StopIteration:
break
else:
if index >= lower:
result.append(value)
iterablecls = iterable.__class__
if not(isinstance(result, iterablecls) or issubclass(iterablecls, dict)):
try:
result = iterablecls(result)
except TypeError:
pass
return result |
def planck(wave, temp, wavelength=True):
"""The Planck radiation or Blackbody radiation as a function of wavelength
or wavenumber. SI units.
_planck(wave, temperature, wavelength=True)
wave = Wavelength/wavenumber or a sequence of wavelengths/wavenumbers (m or m^-1)
temp = Temperature (scalar) or a sequence of temperatures (K)
Output: Wavelength space: The spectral radiance per meter (not micron!)
Unit = W/m^2 sr^-1 m^-1
Wavenumber space: The spectral radiance in Watts per square meter
per steradian per m-1:
Unit = W/m^2 sr^-1 (m^-1)^-1 = W/m sr^-1
Converting from SI units to mW/m^2 sr^-1 (cm^-1)^-1:
1.0 W/m^2 sr^-1 (m^-1)^-1 = 0.1 mW/m^2 sr^-1 (cm^-1)^-1
"""
units = ['wavelengths', 'wavenumbers']
if wavelength:
LOG.debug("Using {0} when calculating the Blackbody radiance".format(
units[(wavelength == True) - 1]))
if np.isscalar(temp):
temperature = np.array([temp, ], dtype='float64')
else:
temperature = np.array(temp, dtype='float64')
shape = temperature.shape
if np.isscalar(wave):
wln = np.array([wave, ], dtype='float64')
else:
wln = np.array(wave, dtype='float64')
if wavelength:
const = 2 * H_PLANCK * C_SPEED ** 2
nom = const / wln ** 5
arg1 = H_PLANCK * C_SPEED / (K_BOLTZMANN * wln)
else:
nom = 2 * H_PLANCK * (C_SPEED ** 2) * (wln ** 3)
arg1 = H_PLANCK * C_SPEED * wln / K_BOLTZMANN
arg2 = np.where(np.greater(np.abs(temperature), EPSILON),
np.array(1. / temperature), -9).reshape(-1, 1)
arg2 = np.ma.masked_array(arg2, mask=arg2 == -9)
LOG.debug("Max and min - arg1: %s %s", str(arg1.max()), str(arg1.min()))
LOG.debug("Max and min - arg2: %s %s", str(arg2.max()), str(arg2.min()))
try:
exp_arg = np.multiply(arg1.astype('float32'), arg2.astype('float32'))
except MemoryError:
LOG.warning(("Dimensions used in numpy.multiply probably reached "
"limit!\n"
"Make sure the Radiance<->Tb table has been created "
"and try running again"))
raise
LOG.debug("Max and min before exp: %s %s", str(exp_arg.max()),
str(exp_arg.min()))
if exp_arg.min() < 0:
LOG.warning("Something is fishy: \n" +
"\tDenominator might be zero or negative in radiance derivation:")
dubious = np.where(exp_arg < 0)[0]
LOG.warning(
"Number of items having dubious values: " + str(dubious.shape[0]))
denom = np.exp(exp_arg) - 1
rad = nom / denom
rad = np.where(rad.mask, np.nan, rad.data)
radshape = rad.shape
if wln.shape[0] == 1:
if temperature.shape[0] == 1:
return rad[0, 0]
else:
return rad[:, 0].reshape(shape)
else:
if temperature.shape[0] == 1:
return rad[0, :]
else:
if len(shape) == 1:
return np.reshape(rad, (shape[0], radshape[1]))
else:
return np.reshape(rad, (shape[0], shape[1], radshape[1])) | The Planck radiation or Blackbody radiation as a function of wavelength
or wavenumber. SI units.
_planck(wave, temperature, wavelength=True)
wave = Wavelength/wavenumber or a sequence of wavelengths/wavenumbers (m or m^-1)
temp = Temperature (scalar) or a sequence of temperatures (K)
Output: Wavelength space: The spectral radiance per meter (not micron!)
Unit = W/m^2 sr^-1 m^-1
Wavenumber space: The spectral radiance in Watts per square meter
per steradian per m-1:
Unit = W/m^2 sr^-1 (m^-1)^-1 = W/m sr^-1
Converting from SI units to mW/m^2 sr^-1 (cm^-1)^-1:
1.0 W/m^2 sr^-1 (m^-1)^-1 = 0.1 mW/m^2 sr^-1 (cm^-1)^-1 | Below is the the instruction that describes the task:
### Input:
The Planck radiation or Blackbody radiation as a function of wavelength
or wavenumber. SI units.
_planck(wave, temperature, wavelength=True)
wave = Wavelength/wavenumber or a sequence of wavelengths/wavenumbers (m or m^-1)
temp = Temperature (scalar) or a sequence of temperatures (K)
Output: Wavelength space: The spectral radiance per meter (not micron!)
Unit = W/m^2 sr^-1 m^-1
Wavenumber space: The spectral radiance in Watts per square meter
per steradian per m-1:
Unit = W/m^2 sr^-1 (m^-1)^-1 = W/m sr^-1
Converting from SI units to mW/m^2 sr^-1 (cm^-1)^-1:
1.0 W/m^2 sr^-1 (m^-1)^-1 = 0.1 mW/m^2 sr^-1 (cm^-1)^-1
### Response:
def planck(wave, temp, wavelength=True):
"""The Planck radiation or Blackbody radiation as a function of wavelength
or wavenumber. SI units.
_planck(wave, temperature, wavelength=True)
wave = Wavelength/wavenumber or a sequence of wavelengths/wavenumbers (m or m^-1)
temp = Temperature (scalar) or a sequence of temperatures (K)
Output: Wavelength space: The spectral radiance per meter (not micron!)
Unit = W/m^2 sr^-1 m^-1
Wavenumber space: The spectral radiance in Watts per square meter
per steradian per m-1:
Unit = W/m^2 sr^-1 (m^-1)^-1 = W/m sr^-1
Converting from SI units to mW/m^2 sr^-1 (cm^-1)^-1:
1.0 W/m^2 sr^-1 (m^-1)^-1 = 0.1 mW/m^2 sr^-1 (cm^-1)^-1
"""
units = ['wavelengths', 'wavenumbers']
if wavelength:
LOG.debug("Using {0} when calculating the Blackbody radiance".format(
units[(wavelength == True) - 1]))
if np.isscalar(temp):
temperature = np.array([temp, ], dtype='float64')
else:
temperature = np.array(temp, dtype='float64')
shape = temperature.shape
if np.isscalar(wave):
wln = np.array([wave, ], dtype='float64')
else:
wln = np.array(wave, dtype='float64')
if wavelength:
const = 2 * H_PLANCK * C_SPEED ** 2
nom = const / wln ** 5
arg1 = H_PLANCK * C_SPEED / (K_BOLTZMANN * wln)
else:
nom = 2 * H_PLANCK * (C_SPEED ** 2) * (wln ** 3)
arg1 = H_PLANCK * C_SPEED * wln / K_BOLTZMANN
arg2 = np.where(np.greater(np.abs(temperature), EPSILON),
np.array(1. / temperature), -9).reshape(-1, 1)
arg2 = np.ma.masked_array(arg2, mask=arg2 == -9)
LOG.debug("Max and min - arg1: %s %s", str(arg1.max()), str(arg1.min()))
LOG.debug("Max and min - arg2: %s %s", str(arg2.max()), str(arg2.min()))
try:
exp_arg = np.multiply(arg1.astype('float32'), arg2.astype('float32'))
except MemoryError:
LOG.warning(("Dimensions used in numpy.multiply probably reached "
"limit!\n"
"Make sure the Radiance<->Tb table has been created "
"and try running again"))
raise
LOG.debug("Max and min before exp: %s %s", str(exp_arg.max()),
str(exp_arg.min()))
if exp_arg.min() < 0:
LOG.warning("Something is fishy: \n" +
"\tDenominator might be zero or negative in radiance derivation:")
dubious = np.where(exp_arg < 0)[0]
LOG.warning(
"Number of items having dubious values: " + str(dubious.shape[0]))
denom = np.exp(exp_arg) - 1
rad = nom / denom
rad = np.where(rad.mask, np.nan, rad.data)
radshape = rad.shape
if wln.shape[0] == 1:
if temperature.shape[0] == 1:
return rad[0, 0]
else:
return rad[:, 0].reshape(shape)
else:
if temperature.shape[0] == 1:
return rad[0, :]
else:
if len(shape) == 1:
return np.reshape(rad, (shape[0], radshape[1]))
else:
return np.reshape(rad, (shape[0], shape[1], radshape[1])) |
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url'] | Post the report to a GitHub Gist and return the URL of the gist. | Below is the the instruction that describes the task:
### Input:
Post the report to a GitHub Gist and return the URL of the gist.
### Response:
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url'] |
def validate(repo,
validator_name=None,
filename=None,
rulesfiles=None,
args=[]):
"""
Validate the content of the files for consistency. Validators can
look as deeply as needed into the files. dgit treats them all as
black boxes.
Parameters
----------
repo: Repository object
validator_name: Name of validator, if any. If none, then all validators specified in dgit.json will be included.
filename: Pattern that specifies files that must be processed by the validators selected. If none, then the default specification in dgit.json is used.
rules: Pattern specifying the files that have rules that validators will use
show: Print the validation results on the terminal
Returns
-------
status: A list of dictionaries, each with target file processed, rules file applied, status of the validation and any error message.
"""
mgr = plugins_get_mgr()
# Expand the specification. Now we have full file paths
validator_specs = instantiate(repo, validator_name, filename, rulesfiles)
# Run the validators with rules files...
allresults = []
for v in validator_specs:
keys = mgr.search(what='validator',name=v)['validator']
for k in keys:
validator = mgr.get_by_key('validator', k)
result = validator.evaluate(repo,
validator_specs[v],
args)
allresults.extend(result)
return allresults | Validate the content of the files for consistency. Validators can
look as deeply as needed into the files. dgit treats them all as
black boxes.
Parameters
----------
repo: Repository object
validator_name: Name of validator, if any. If none, then all validators specified in dgit.json will be included.
filename: Pattern that specifies files that must be processed by the validators selected. If none, then the default specification in dgit.json is used.
rules: Pattern specifying the files that have rules that validators will use
show: Print the validation results on the terminal
Returns
-------
status: A list of dictionaries, each with target file processed, rules file applied, status of the validation and any error message. | Below is the the instruction that describes the task:
### Input:
Validate the content of the files for consistency. Validators can
look as deeply as needed into the files. dgit treats them all as
black boxes.
Parameters
----------
repo: Repository object
validator_name: Name of validator, if any. If none, then all validators specified in dgit.json will be included.
filename: Pattern that specifies files that must be processed by the validators selected. If none, then the default specification in dgit.json is used.
rules: Pattern specifying the files that have rules that validators will use
show: Print the validation results on the terminal
Returns
-------
status: A list of dictionaries, each with target file processed, rules file applied, status of the validation and any error message.
### Response:
def validate(repo,
validator_name=None,
filename=None,
rulesfiles=None,
args=[]):
"""
Validate the content of the files for consistency. Validators can
look as deeply as needed into the files. dgit treats them all as
black boxes.
Parameters
----------
repo: Repository object
validator_name: Name of validator, if any. If none, then all validators specified in dgit.json will be included.
filename: Pattern that specifies files that must be processed by the validators selected. If none, then the default specification in dgit.json is used.
rules: Pattern specifying the files that have rules that validators will use
show: Print the validation results on the terminal
Returns
-------
status: A list of dictionaries, each with target file processed, rules file applied, status of the validation and any error message.
"""
mgr = plugins_get_mgr()
# Expand the specification. Now we have full file paths
validator_specs = instantiate(repo, validator_name, filename, rulesfiles)
# Run the validators with rules files...
allresults = []
for v in validator_specs:
keys = mgr.search(what='validator',name=v)['validator']
for k in keys:
validator = mgr.get_by_key('validator', k)
result = validator.evaluate(repo,
validator_specs[v],
args)
allresults.extend(result)
return allresults |
def get_long_description():
"""Transform README.md into a usable long description.
Replaces relative references to svg images to absolute https references.
"""
with open('README.md') as f:
read_me = f.read()
def replace_relative_with_absolute(match):
svg_path = match.group(0)[1:-1]
return ('(https://github.com/google/pybadges/raw/master/'
'%s?sanitize=true)' % svg_path)
return re.sub(r'\(tests/golden-images/.*?\.svg\)',
replace_relative_with_absolute,
read_me) | Transform README.md into a usable long description.
Replaces relative references to svg images to absolute https references. | Below is the the instruction that describes the task:
### Input:
Transform README.md into a usable long description.
Replaces relative references to svg images to absolute https references.
### Response:
def get_long_description():
"""Transform README.md into a usable long description.
Replaces relative references to svg images to absolute https references.
"""
with open('README.md') as f:
read_me = f.read()
def replace_relative_with_absolute(match):
svg_path = match.group(0)[1:-1]
return ('(https://github.com/google/pybadges/raw/master/'
'%s?sanitize=true)' % svg_path)
return re.sub(r'\(tests/golden-images/.*?\.svg\)',
replace_relative_with_absolute,
read_me) |
def _deduce_security(kwargs) -> nmcli.SECURITY_TYPES:
""" Make sure that the security_type is known, or throw. """
# Security should be one of our valid strings
sec_translation = {
'wpa-psk': nmcli.SECURITY_TYPES.WPA_PSK,
'none': nmcli.SECURITY_TYPES.NONE,
'wpa-eap': nmcli.SECURITY_TYPES.WPA_EAP,
}
if not kwargs.get('securityType'):
if kwargs.get('psk') and kwargs.get('eapConfig'):
raise ConfigureArgsError(
'Cannot deduce security type: psk and eap both passed')
elif kwargs.get('psk'):
kwargs['securityType'] = 'wpa-psk'
elif kwargs.get('eapConfig'):
kwargs['securityType'] = 'wpa-eap'
else:
kwargs['securityType'] = 'none'
try:
return sec_translation[kwargs['securityType']]
except KeyError:
raise ConfigureArgsError('securityType must be one of {}'
.format(','.join(sec_translation.keys()))) | Make sure that the security_type is known, or throw. | Below is the the instruction that describes the task:
### Input:
Make sure that the security_type is known, or throw.
### Response:
def _deduce_security(kwargs) -> nmcli.SECURITY_TYPES:
""" Make sure that the security_type is known, or throw. """
# Security should be one of our valid strings
sec_translation = {
'wpa-psk': nmcli.SECURITY_TYPES.WPA_PSK,
'none': nmcli.SECURITY_TYPES.NONE,
'wpa-eap': nmcli.SECURITY_TYPES.WPA_EAP,
}
if not kwargs.get('securityType'):
if kwargs.get('psk') and kwargs.get('eapConfig'):
raise ConfigureArgsError(
'Cannot deduce security type: psk and eap both passed')
elif kwargs.get('psk'):
kwargs['securityType'] = 'wpa-psk'
elif kwargs.get('eapConfig'):
kwargs['securityType'] = 'wpa-eap'
else:
kwargs['securityType'] = 'none'
try:
return sec_translation[kwargs['securityType']]
except KeyError:
raise ConfigureArgsError('securityType must be one of {}'
.format(','.join(sec_translation.keys()))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.