content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def spf_record_for(hostname, bypass_cache=True):
"""Retrieves SPF record for a given hostname.
According to the standard, domain must not have multiple SPF records, so
if it's the case then an empty string is returned.
"""
try:
primary_ns = None
if bypass_cache:
primary_ns = get_primary_nameserver(hostname)
txt_records = query_dns(hostname, 'txt', primary_ns)
spf_records = [r for r in txt_records if r.strip().startswith('v=spf')]
if len(spf_records) == 1:
return spf_records[0]
except Exception as e:
log.exception(e)
return '' | 5013285bb291a22d8ce2565dd605b16b9c730bba | 9,198 |
def get_duplicated_members(first_name, last_name):
"""同じ名前の持つメンバーが存在するかどうか
:param first_name:
:param last_name:
:return:
"""
first_name = first_name.strip() if first_name else None
last_name = last_name.strip() if last_name else None
queryset = models.Member.objects.filter(
first_name=first_name,
last_name=last_name,
)
return queryset | f7160d1a710b123e62a5e0ebf0d4e973303f4c2b | 9,200 |
import logging
def get_oauth_id():
"""Returns user email ID if OAUTH token present, or None."""
try:
user_email = oauth.get_current_user(SCOPE).email()
except oauth.Error as e:
user_email = None
logging.error('OAuth failure: {}'.format(e))
return user_email | d16a1785cf3cfd12f57ab3bbc1fd5318bc634dd2 | 9,201 |
import re
def check_for_publication(form, formsets, user_data):
"""
Run additional validation across forms fields for status LILACS-Express and LILACS
"""
valid = valid_descriptor = valid_url = True
# regex match starts with S (Serial) and ends with (as) analytic
regex_sas = r"^S.*as$"
Sas_record = re.search(regex_sas, form.document_type)
status = form.cleaned_data.get('status')
user_role = user_data['service_role'].get('LILDBI')
# for LILACS status and not Serie Source is required at least one primary descriptor
if status == 1 and form.document_type != 'S':
valid_descriptor = check_descriptor(form, formsets['descriptor'])
# for LILACS indexed check url/fulltext/page
if form.is_LILACS and status != -1:
# for journal article (Sas record) check for electronic_address OR fulltext file #159
if Sas_record:
valid_url = check_url_or_attachment(form, formsets['attachment'])
elif form.document_type != 'S' and form.document_type != 'Mc':
# for other types of analytic records check for page or electronic_address #160
valid_url = check_url_or_page(form, formsets['attachment'])
if not valid_descriptor or not valid_url:
valid = False
return valid | c36364c75f97c7eb299611471df1ff29e9837bfd | 9,202 |
def _generate_var_name(prefix, field_name):
"""
Generate the environment variable name, given a prefix
and the configuration field name.
Examples:
>>> _generate_var_name("", "some_var")
"SOME_VAR"
>>> _generate_var_name("my_app", "some_var")
"MY_APP_SOME_VAR"
:param prefix: the prefix to be used, can be empty
:param field_name: the name of the field from which the variable is derived
"""
return (
"_".join((prefix, field_name)).upper()
if prefix
else field_name.upper()
) | 9065d1deb76789582e68df779ec2c961a7d4aedc | 9,203 |
def VelocityPostProcessingChooser(transport):
"""
pick acceptable velocity postprocessing based on input
"""
tryNew = True
velocityPostProcessor = None
if transport.conservativeFlux is not None:
if (transport.mesh.parallelPartitioningType == 0 and transport.mesh.nLayersOfOverlap==0): #element-based partition
logEvent("Cannot specify conservative flux if partitioned by element with no element overlaps")
exit()
ppcomps = []
pptypes = {}
for ci in list(transport.conservativeFlux.keys()):
if (transport.conservativeFlux[ci] == 'p1-nc' and
isinstance(transport.u[ci].femSpace,FemTools.NC_AffineLinearOnSimplexWithNodalBasis)):
ppcomps.append(ci)
pptypes[ci] = 'p1-nc'
#end p1-nc for comp ci
elif 'pwl' in transport.conservativeFlux[ci]:
ppcomps.append(ci)
pptypes[ci] = transport.conservativeFlux[ci]
elif transport.conservativeFlux[ci] in ['point-eval','dg-point-eval','point-eval-gwvd']: #tjp addin for gwvd
ppcomps.append(ci)
pptypes[ci] = transport.conservativeFlux[ci]
elif transport.conservativeFlux[ci] == 'pwc':
ppcomps.append(ci)
pptypes[ci] = 'pwc'
elif 'sun-' in transport.conservativeFlux[ci]:
ppcomps.append(ci)
pptypes[ci] = transport.conservativeFlux[ci]
elif transport.conservativeFlux[ci] in ['dg','dg-bdm']:
ppcomps.append(ci)
pptypes[ci] = transport.conservativeFlux[ci]
else:
logEvent("Unrecognized conservative flux", transport.conservativeFlux[ci])
#for ci
if tryNew:
velocityPostProcessor = AggregateVelocityPostProcessor(pptypes,transport)
else:
velocityPostProcessor = VelocityPostProcessor_Original(pptypes,
transport,
ppcomps)
#conservative flux specified
return velocityPostProcessor | 44484d2b1f35ac865d9b5a1d53a62f4234bea4ee | 9,204 |
def get_node_hierarchical_structure(graph: nx.Graph, node: str, hop: int):
"""
explore hierarchical neighborhoods of node
"""
layers = [[node]]
curLayer = {node}
visited = {node}
for _ in range(hop):
if len(curLayer) == 0:
break
nextLayer = set()
for neighbor in curLayer:
for next_hop_neighbor in nx.neighbors(graph, neighbor):
if next_hop_neighbor not in visited:
nextLayer.add(next_hop_neighbor)
visited.add(next_hop_neighbor)
curLayer = nextLayer
layers.append(list(nextLayer))
return layers | 132db2de60459ea41a142ae17e5ad08fb325692c | 9,205 |
def svn_utf_cstring_from_utf8_string(*args):
"""svn_utf_cstring_from_utf8_string(svn_string_t src, apr_pool_t pool) -> svn_error_t"""
return _core.svn_utf_cstring_from_utf8_string(*args) | 0651604821f3164f6b4847397a7a484bd0a51568 | 9,206 |
def fitness_sum(element):
"""
Test fitness function.
"""
return np.sum(element) | c2a6881864e4a31ed0ffe18c276573a7bdd6a867 | 9,207 |
def Stepk(k, basetree=[]): # XXX. make sure basetree is passed as expected.
"""Try to solve the puzzle using assumptions.
k --> The step number. (1st step is solving exactly,
2nd step is solving using 1 assumption,
3rd step is solving using 2 assumptions and so on.)
Note: The assumption level of this step will be k-1.
basetree --> list of parent assumption levels.
It helps in getting the tree structure of (nested)
assumptions.
Example- basetree = [3,2] --> This means that this Stepk function has been
called (recursively) from another Stepk function (with k = 3) which was
itself called from another Stepk function (with k = 4).
==============
Return value:
==============
1 - puzzle was solved in this step.
0 - puzzle was not solved in this step.
"""
# Note: If the puzzle being solved does not have a unique solution and
# the parameter k is large (say 5 or more) then this function will give
# one of the many possible solutions.
# But whichever solution it gives, it will be definately correct!
print "Puzzle complete?"
if isPuzzleComplete():
print "> Complete!"
return 1
else:
print "> Not yet!"
assumptionleveltree = basetree + [k - 1]
print "\n(New Assumption Level.\nAssumption Tree: %s\n" \
"Saving puzzle...)\n" % assumptionleveltree
initialpuzzle, initiallabelrestrictionscount = SavePuzzle()
for row in xrange(9):
for col in xrange(9):
# substitute for sudokucellswithonly2possibilities
if (not (IsCellEmpty(row, col) and
(lenLabelsPermissible(row, col) == 3))):
continue # ==3 becoz 1st is a ''
_labels = GetPermissibleLabels(row, col, 2)
for i in (0, 1): # iterate through the permissible labels.
# XXX. improve this
if i == 0:
otherlabel = _labels[1]
else:
otherlabel = _labels[0]
print "Assuming %s in cell (%d,%d)\n[Other can be %s]\n" \
% (_labels[i], row + 1, col + 1, otherlabel)
setSudokuCellLabel(row, col, _labels[i])
if k != 2:
print "(Entering into nested\nassumption...)\n"
SolveUptoSteps(k - 1, assumptionleveltree)
if k != 2:
print "(Exiting from nested\nassumption...)\n"
print "Puzzle complete?"
if isPuzzleComplete():
# This means that the assumption taken above was
# correct and the puzzle got solved. Hence, return 1.
print "> Complete!" \
# add this later.. (Assumption Level Tree: %s)
return 1
else:
print "> Not yet!\n\nAssumption correct?"
if isPuzzleCorrect():
# This means that the puzzle is incompletely filled
# and it cannot be decided from this point whether
# the assumption taken above is correct or
# incorrect.
print "Maybe. Can't say anything\nas of now."\
" Assumption was\n%s in (%d,%d)\n" \
% (_labels[i], row + 1, col + 1)
# caching
if i == 0:
# This is caching, for speeding up the solve
# process. If 'label' is the 1st of the 2
# permissible labels then save the solution, it
# might be possible that the 2nd of the 2
# permissible options is definitely incorrect,
# (and consequently this assumption is correct)
# so we will need this solution!
# (better to save it, rather than finding it
# again later.)
print "Saving the above puzzle.\n" \
"Will be useful if other\n" \
"assumption (on same cell)\n"\
"is definitely incorrect.\n"
temppuzzle, templabelrestrictionscount = \
SavePuzzle()
# As it cannot be decided standing at this point
# whether the above assumption is correct or
# incorrect, revert to initial conditions and try
# the other options!
print "Reverting to this puzzle\n"\
"(saved at the beginning \n"\
"of this assumption) -"
LoadPuzzle(initialpuzzle,
initiallabelrestrictionscount)
PrintPuzzle()
else:
# This means that puzzle is incorrectly filled, so
# it is sure that the above asumption is definately
# incorrect, so the other among the 2 permissible
# labels is definately correct.
print "Definately incorrect!\n" \
"[%s in cell (%d,%d)]\n" \
% (_labels[i], row + 1, col + 1)
# decide whether label is the 1st of the permissible
# the 1st labels or the 2nd one.
if i == 1:
# This means that the assumption we took
# (2nd of the 2 permissible labels) is
# incorrect, & as this assumption is incorrect,
# the 1st of the 2 assumptions is definately
# correct. Moreover, the puzzle solution to
# the 1st permissible label is already saved in
# temppuzzle, so just load it.
print "Hence previous assumption\n" \
"was correct - \n" \
"[%s in cell (%d,%d)]\n" \
"Revert to the its\n" \
"solution puzzle. \n" \
"(Good, I had saved it!\n" \
"Saved my time!)" \
% (otherlabel, row + 1, col + 1)
PrintPuzzle()
LoadPuzzle(temppuzzle,
templabelrestrictionscount)
else:
print "Hence, defintely correct-\n" \
"[%s in cell (%d,%d)]\n" \
% (otherlabel, row + 1, col + 1)
# This means that 2nd of the 2 permissible
# labels is correct, so revert to the puzzle
# that was at the beginning of the outermost
# for loop and then set the 2nd of the
# 2 permissible labels.
LoadPuzzle(initialpuzzle,
initiallabelrestrictionscount)
setSudokuCellLabel(row, col, _labels[1])
# Delete all the variables defined at this point,
# as this function will be going into a recursive
# loop from here on, and this data, unnecessarily,
# will form a stack.
del initialpuzzle
del initiallabelrestrictionscount
del row
del col
del _labels
del i
del otherlabel
# Now, the puzzle solution has moved one step
# ahead, so try to solve it further using the
# "less complex", "previous" steps.
if k != 2:
print "(Entering into nested\nassumption...)\n"
SolveUptoSteps(k - 1, assumptionleveltree)
if k != 2:
print "(Exiting from nested\nassumption...)\n"
# Finally, repeat this step again to solve the
# puzzle further. (it is quite possile that in the
# previous step itself, the puzzle might have got
# solved. If so, it will just enter this function
# (in recursion) and return from the very
# 1st check)
return(Stepk(k, basetree))
# If this part is getting executed means this function did not help
# in solving the puzzle any further.
print "Didn't get anything from\nthis Assumption Level.\n" \
"Assumption Tree: %s\n" % assumptionleveltree
return 0 | cbf28b995deee1ff3432c46d3e48cf9b0c8fd31a | 9,208 |
def load_handler(path, *args, **kwargs):
"""
Given a path to a handler, return an instance of that handler.
E.g.::
>>> load_handler('anthill.framework.core.files.uploadhandler.TemporaryFileUploadHandler', request)
<TemporaryFileUploadHandler object at 0x...>
"""
return import_string(path)(*args, **kwargs) | 26a9b3ebaa0ab2362a9a1ab977281c25334e0d9c | 9,209 |
def validate_twilio_request():
"""Ensure a request is coming from Twilio by checking the signature."""
validator = RequestValidator(current_app.config['TWILIO_AUTH_TOKEN'])
if 'X-Twilio-Signature' not in request.headers:
return False
signature = request.headers['X-Twilio-Signature']
if 'SmsSid' in request.form:
url = url_for('check_raffle', _external=True)
else:
return False
return validator.validate(url, request.form, signature.encode('UTF-8')) | bb35e83223ac8530a6da8fed581ba5cbc8afe47e | 9,210 |
def paper_selection(text=[], keywords=[]):
"""
This function calculates the similarity between keywords or phrases relating a text. So it is possible to compare
several texts and keywords in once to see which text is the best relating special keywords. Also a plot is
generated, where it is possible to see the scores of all paper and keywords
:param text: This is a list of texts which you want to compare with the keywords
:param keywords: The keywords in this list are used to compare the single texts.
:return:
"""
df = PaperSelection.paper_importance(text, keywords)
fig = PaperSelection.plot_paper_selection(df)
return df, fig | ac6d16ac183f081ef193bf43e782019c38c04106 | 9,211 |
def _get_out_of_bounds_window(radius, padding_value):
"""Return a window full of padding_value."""
return padding_value * np.ones((2 * radius + 1, 2 * radius + 1), dtype=int) | 0dacf7d63f5e0be21deb92f02fe3b76bd201b5ec | 9,213 |
import logging
def covid_API_request(
location: str = "Exeter",
location_type: str = "ltla") -> dict[str]:
"""Requests current COVID data from the Cov19API for a given area.
Uses the Cov19API to request the most recent COVID data for
a given area. Returns data as a list of comma separated strings.
Args:
location: The requested COVID data location.
location_type: The type of area requested ("nation" or "ltla").
Returns:
A dictionary containing a csv file containing COVID information
for an area, indexed by the area's name.
"""
requested_area = ["areaType="+location_type, "areaName="+location]
requested_data = {
"areaCode": "areaCode",
"areaName": "areaName",
"areaType": "areaType",
"date": "date",
"cumDailyNsoDeathsByDeathDate": "cumDailyNsoDeathsByDeathDate",
"hospitalCases": "hospitalCases",
"newCasesBySpecimenDate": "newCasesBySpecimenDate"
}
logging.info("Requesting COVID data for %s...", location)
api = Cov19API(filters=requested_area, structure=requested_data)
data = api.get_csv()
covid_data[location] = data.split("\n")[:-1]
logging.info("COVID data for %s updated.", location)
return covid_data | 5b931e3d30f51ff64fc206cf5d30f7fd925d2b78 | 9,215 |
def resize(img, height, width, is_flow, mask=None):
"""Resize an image or flow field to a new resolution.
In case a mask (per pixel {0,1} flag) is passed a weighted resizing is
performed to account for missing flow entries in the sparse flow field. The
weighting is based on the resized mask, which determines the 'amount of valid
flow vectors' that contributed to each individual resized flow vector. Hence,
multiplying by the reciprocal cancels out the effect of considering non valid
flow vectors.
Args:
img: tf.tensor, image or flow field to be resized of shape [b, h, w, c]
height: int, heigh of new resolution
width: int, width of new resolution
is_flow: bool, flag for scaling flow accordingly
mask: tf.tensor, mask (optional) per pixel {0,1} flag
Returns:
Resized and potentially scaled image or flow field (and mask).
"""
def _resize(image, mask=None):
# _, orig_height, orig_width, _ = img.shape.as_list()
orig_height = tf.shape(input=image)[1]
orig_width = tf.shape(input=image)[2]
if mask is not None:
# multiply with mask, to ensure non-valid locations are zero
image = tf.math.multiply(image, mask)
# resize image
img_resized = tf.compat.v2.image.resize(
image, (int(height), int(width)), antialias=True)
# resize mask (will serve as normalization weights)
mask_resized = tf.compat.v2.image.resize(
mask, (int(height), int(width)), antialias=True)
# normalize sparse flow field and mask
img_resized = tf.math.multiply(
img_resized, tf.math.reciprocal_no_nan(mask_resized))
mask_resized = tf.math.multiply(
mask_resized, tf.math.reciprocal_no_nan(mask_resized))
else:
# normal resize without anti-alaising
img_resized = tf.compat.v2.image.resize(image, (tf.cast(height,
tf.int32),
tf.cast(width,
tf.int32)))
if is_flow:
# If image is a flow image, scale flow values to be consistent with the
# new image size.
scaling = tf.reshape([
float(height) / tf.cast(orig_height, tf.float32),
float(width) / tf.cast(orig_width, tf.float32)
], [1, 1, 1, 2])
img_resized *= scaling
if mask is not None:
return img_resized, mask_resized
return img_resized
# Apply resizing at the right shape.
shape = img.shape.as_list()
if img.shape.rank == 3:
if mask is not None:
img_resized, mask_resized = _resize(img[None], mask[None])
return img_resized[0], mask_resized[0]
else:
return _resize(img[None])[0]
if img.shape.rank == 4:
# Input at the right shape.
return _resize(img, mask)
if img.shape.rank > 4:
# Reshape input to [b, h, w, c], resize and reshape back.
outer_shape = tf.shape(input=img)[:-3]
required_shape = tf.concat([[-1], tf.shape(input=img)[-3:]], axis=0)
img_flattened = tf.reshape(img, required_shape)
if mask is not None:
mask_flattened = tf.reshape(mask, required_shape)
img_resized, mask_resized = _resize(img_flattened, mask_flattened)
else:
img_resized = _resize(img_flattened)
final_shape = tf.concat(
[outer_shape, tf.shape(input=img_resized)[-3:]], axis=0)
result_img = tf.reshape(img_resized, final_shape)
if mask is not None:
final_mask_shape = tf.concat(
[outer_shape, tf.shape(input=mask_resized)[-3:]], axis=0)
result_mask = tf.reshape(mask_resized, final_mask_shape)
return result_img, result_mask
return result_img
else:
raise ValueError('Cannot resize an image of shape', shape) | 9d0543a88382028522ae469fc773dcebc006b5c3 | 9,216 |
def num_decodings2(enc_mes):
"""
:type s: str
:rtype: int
"""
if not enc_mes or enc_mes.startswith('0'):
return 0
stack = [1, 1]
for i in range(1, len(enc_mes)):
if enc_mes[i] == '0':
if enc_mes[i-1] == '0' or enc_mes[i-1] > '2':
# only '10', '20' is valid
return 0
stack.append(stack[-2])
elif 9 < int(enc_mes[i-1:i+1]) < 27:
# '01 - 09' is not allowed
stack.append(stack[-2]+stack[-1])
else:
# other case '01, 09, 27'
stack.append(stack[-1])
return stack[-1] | ae4ff7181e34003dcc7ec264ed2727bc716708a5 | 9,217 |
def spot2Cmyk(spot, default=None):
"""Answers the CMYK value of spot color. If the value does not exist,
answer default of black. Note that this is a double conversion:
spot-->rgb-->cmyk
>>> '%0.2f, %0.2f, %0.2f, %0.2f' % spot2Cmyk(300)
'0.78, 0.33, 0.00, 0.22'
>>> # Nonexistent spot colors map to default or black.
>>> spot2Cmyk(10000000)
(0, 0, 0, 1)
"""
return rgb2Cmyk(spot2Rgb(spot, default=default)) | 307c8e934cdac2f5fb857e8f8f122c9862adab6d | 9,218 |
import re
def clean(text):
"""
Removes irrelevant parts from :param: text.
"""
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
# for left, right in options.ignored_tag_patterns:
# for m in left.finditer(text):
# spans.append((m.start(), m.end()))
# for m in right.finditer(text):
# spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Drop discarded elements
# for tag in options.discardElements:
# text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', '«').replace('>>', '»')
#############################################
# Cleanup text
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(' (,:\.\)\]»)', r'\1', text)
text = re.sub('\([^a-zA-Z\d]*\)', '', text)
text = re.sub('(\[\(«) ', r'\1', text)
text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
text = text.replace(',,', ',').replace(',.', '.')
text = text.replace(' , ', ', ')
if keep_tables:
# the following regular expressions are used to remove the wikiml chartacters around table strucutures
# yet keep the content. The order here is imporant so we remove certain markup like {| and then
# then the future html attributes such as 'style'. Finally we drop the remaining '|-' that delimits cells.
text = re.sub(r'!(?:\s)?style=\"[a-z]+:(?:\d+)%;\"', r'', text)
text = re.sub(r'!(?:\s)?style="[a-z]+:(?:\d+)%;[a-z]+:(?:#)?(?:[0-9a-z]+)?"', r'', text)
text = text.replace('|-', '')
text = text.replace('|', '')
text = text.replace('(; ', '(')
text = text.strip()
return text | 0e942e36035d2129ca0be814268e7c6e4552435e | 9,219 |
from dateutil import tz
from datetime import datetime
import numpy
def get_offset(t0,t1,zone,station,gps):
"""
Determine UTC to local Local offset to be applied.
Parameters
----------
t0 : datetime
Starting timestamp
t1 : datetime
End timestamp
zone : str
Define timing zone, either Local or UTC
city : str
City where the sensor is located
Return
------
offset : datetime
Offset time to match time in targeted filename
"""
# Identifying the time zone
utc_zone = tz.gettz('UTC')
# Format input timestamp into UTC time
utc_epoch = t0.replace(tzinfo=utc_zone)
# Get time in local California time
local_epoch = utc_epoch.astimezone(tz.gettz('America/Los_Angeles'))
# Calculate offset between UTC and PST timestamps
utc2pst = datetime.utcoffset(local_epoch).total_seconds()
# Consider UTC to PST offset if requested time is before fix date
utc2pst = utc2pst if t0<datetime(2017,12,7) else 0
# Look-up table to identify station's location over time
locations = numpy.array([[1,datetime(2015,11,1),datetime(2017,12,3),tz.gettz('America/Los_Angeles')],
[1,datetime(2017,12,3),datetime.max ,tz.gettz('America/New_York') ],
[2,datetime(2015,11,1),datetime.max ,tz.gettz('America/Los_Angeles')],
[3,datetime(2015,11,1),datetime(2017,10,6),tz.gettz('America/Los_Angeles')],
[3,datetime(2017,10,6),datetime.max ,tz.gettz('America/New_York') ],
[4,datetime(2015,11,1),datetime(2017,12,3),tz.gettz('America/Los_Angeles')],
[4,datetime(2017,12,3),datetime.max ,tz.gettz('America/New_York') ]])
# Identify the location for requested data
for n,start,end,loc in locations:
if n==station and start<t0<end:
local_zone = loc
# Identifying the time zone
utc_zone = tz.gettz('UTC')
# Format input timestamp into UTC time
utc_epoch = t0.replace(tzinfo=utc_zone)
# Get time in local California time
local_epoch = utc_epoch.astimezone(local_zone)
# Calculate offset between Local and UTC timestamps
utc2local = datetime.utcoffset(local_epoch).total_seconds()
# Check if first version of timing data
if t1<datetime(2016,6,10):
# Calculate offset between provided UTC to local timestamps
offset = -utc2local if zone=='UTC' else 0
# Check if second version of timing data
if t0>datetime(2016,6,10):
# Calculate offset between provided local to UTC timestamps
offset = -utc2local if zone=='Local' and gps=='on' else 0
return utc2local,offset,utc2pst | f6ed5f50528a67735097abf17d3039008a61b547 | 9,220 |
def my_render_template(html, **arguments):
"""Call render_template with comparison_types as one of the arguments.
:param string html: name of the template
:param **arguments: other arguments to be passed while rendering template
"""
arguments.setdefault(
'comparison_types', ComparisonType.get_cache(g.db_session)
)
return render_template(html, **arguments) | 0a639a9dd8cef8c0cc659444d32138acf9a43e41 | 9,221 |
def find_or_create_qualification(qualification_name, description,
must_be_owned=True):
"""Query amazon to find the existing qualification name, return the Id. If
it exists and must_be_owned is true but we don't own it, this prints an
error and returns none. If it doesn't exist, the qualification is created
"""
qual_id = find_qualification(
qualification_name,
must_be_owned=must_be_owned
)
if qual_id is False:
return None
if qual_id is not None:
return qual_id
# Create the qualification, as it doesn't exist yet
client = boto3.client(
service_name='mturk',
region_name='us-east-1',
endpoint_url='https://mturk-requester-sandbox.us-east-1.amazonaws.com'
)
response = client.create_qualification_type(
Name=qualification_name,
Description=description,
QualificationTypeStatus='Active',
)
return response['QualificationType']['QualificationTypeId'] | 92855fbaee2c1f5d190b2c4cd67078b07c6f4e51 | 9,222 |
import torch
def hard_to_soft(Y_h, k):
"""Converts a 1D tensor of hard labels into a 2D tensor of soft labels
Source: MeTaL from HazyResearch, https://github.com/HazyResearch/metal/blob/master/metal/utils.py
Args:
Y_h: an [n], or [n,1] tensor of hard (int) labels in {1,...,k}
k: the largest possible label in Y_h
Returns:
Y_s: a torch.FloatTensor of shape [n, k] where Y_s[i, j-1] is the soft
label for item i and label j
"""
Y_h = Y_h.clone()
if Y_h.dim() > 1:
Y_h = Y_h.squeeze()
assert Y_h.dim() == 1
assert (Y_h >= 0).all()
assert (Y_h < k).all()
n = Y_h.shape[0]
Y_s = torch.zeros((n, k), dtype=Y_h.dtype, device=Y_h.device)
for i, j in enumerate(Y_h):
Y_s[i, int(j)] = 1.0
return Y_s | d31c6749569e19cbbdd91c59e66982497190163d | 9,223 |
import decimal
def truncate_decimal_places(value: decimal.Decimal, places: int = 1) -> float:
"""
Truncate a float (i.e round towards zero) to a given number of decimal places.
NB: Takes a decimal but returns a float!
>>> truncate_decimal_places(12.364, 1)
12.3
>>> round_decimal_places(-12.364, 1)
-12.3 # -12.3 is bigger than -12.4
>>> round_decimal_places(12.364, 0)
12.0 # rounding to 0 returns float with no decmial part
"""
if places == 0:
quantize_string = "1"
else:
quantize_string = "0." + ((places - 1) * "0") + "1"
exponent = decimal.Decimal(quantize_string)
decimal_result = value.quantize(exponent, rounding=decimal.ROUND_DOWN)
return float(decimal_result) | 11b924a5e4f6560674b1f7378f6a4001a3265a97 | 9,226 |
def site_url(self, url):
"""
Return the fully qualified URL for the given URL fragment.
"""
try:
# In Django < 1.9, `live_server_url` is decorated as a `property`, but
# we need to access it on the class.
base_url = self.testclass.live_server_url.__get__(self.testclass)
except AttributeError:
# Dango 1.9 updates `live_server_url` to be a `classproperty`.
base_url = self.testclass.live_server_url
return urljoin(base_url, url) | 4f82cc766d0144fb11e897e7c9ceba57a6881f23 | 9,227 |
def myisinteger(
num: int) -> bool:
"""
Checks if num is an integer
"""
val = 1 if num == floor(num) else 0
return val | a8a1980fb35429d300cb262629f1d20774202f95 | 9,228 |
def _get_timeunit(min_time: pd.Timestamp, max_time: pd.Timestamp, dflt: int) -> str:
"""Auxillary function to find an appropriate time unit. Will find the
time unit such that the number of time units are closest to dflt."""
dt_secs = {
"year": 60 * 60 * 24 * 365,
"quarter": 60 * 60 * 24 * 91,
"month": 60 * 60 * 24 * 30,
"week": 60 * 60 * 24 * 7,
"day": 60 * 60 * 24,
"hour": 60 * 60,
"minute": 60,
"second": 1,
}
time_rng_secs = (max_time - min_time).total_seconds()
prev_bin_cnt, prev_unit = 0, "year"
for unit, secs_in_unit in dt_secs.items():
cur_bin_cnt = time_rng_secs / secs_in_unit
if abs(prev_bin_cnt - dflt) < abs(cur_bin_cnt - dflt):
return prev_unit
prev_bin_cnt = cur_bin_cnt
prev_unit = unit
return prev_unit | 96b1a036bdb64b9c684ed8ac9123cf788ddc189d | 9,229 |
def data_sample(df, x, y, group_number, quantile):
"""
分组选点法
x: 分组变量
y: 取值变量
"""
group_width = (np.max(df[x]) - np.min(df[x])) / group_number # 分组宽度
x_group = np.arange(np.min(df[x]), np.max(df[x]), group_width) # 分组的X
# 选取每组中设定的分位数的点, 对点数大于零的组选点
if len(quantile) == 3:
data_x = np.array([])
data_y = np.array([])
for i in x_group:
if len(df[(df[x] >= i) & (df[x] < i + group_width)]) > 0:
temp_y = np.array(df[(df[x] >= i) & (df[x] < i + group_width)][y].quantile(quantile))
temp_x = np.array([(i + group_width / 4), (i + group_width / 2), (i + 3 * group_width / 4)])
data_x = np.concatenate([data_x, temp_x], axis = 0)
data_y = np.concatenate([data_y, temp_y], axis = 0)
elif len(quantile) == 1:
data_x = []
data_y = []
for i in x_group:
if len(df[(df[x] >= i) & (df[x] < i + group_width)]) > 0:
temp_y = float(df[(df[x] >= i) & (df[x] < i + group_width)][y].quantile(quantile))
temp_x = float(i + group_width / 2)
data_x.append(temp_x)
data_y.append(temp_y)
return data_x, data_y | 9be1ec948f9d427f7b6136b0c4f6bf5622be5843 | 9,231 |
def index(request):
""" Shows all challenges related to the current user """
profile = request.user.get_profile()
chall_user = profile.get_extension(ChallengeUser)
challs = ChallengeGame.get_active(chall_user)
played = ChallengeGame.get_played(chall_user)[:10]
if not chall_user.is_eligible():
messages.error(request, _('Your race can\'t play. Go home'))
return render_to_response('challenge/index.html',
{'challenges': challs, 'played': played, 'challuser': chall_user, 'challenge': ChallengeGame},
context_instance=RequestContext(request)) | b649f74777eedd1093e884f71949cd43c7a215ad | 9,232 |
def hansen(threshold, geojson, begin, end, logger):
"""For a given threshold and geometry return a dictionary of ha area.
The threshold is used to identify which band of loss and tree to select.
asset_id should be 'projects/wri-datalab/HansenComposite_14-15'
Methods used to identify data:
Gain band is a binary (0 = 0, 255=1) of locations where tree cover increased
over data collction period. Calculate area of gain, by converting 255 values
to 1, and then using a trick to convert this to pixel area
(1 * pixelArea()). Finally, we sum the areas over a given polygon using a
reducer, and convert from square meters to hectares.
Tree_X bands show percentage canopy cover of forest, If missing, no trees
present. Therefore, to count the tree area of a given canopy cover, select
the band, convert it to binary (0=no tree cover, 1 = tree cover), and
identify pixel area via a trick, multiplying all 1 vals by image.pixelArea.
Then, sum the values over a region. Finally, divide the result (meters
squared) by 10,000 to convert to hectares
"""
asset_id = 'projects/wri-datalab/HansenComposite_14-15'
d = {}
begin = int(begin.split('-')[0][2:])
end = int(end.split('-')[0][2:])
region = get_region(geojson)
reduce_args = {'reducer': ee.Reducer.sum().unweighted(),
'geometry': region,
'bestEffort': True,
'scale': 90}
gfw_data = ee.Image(asset_id)
loss_band = 'loss_{0}'.format(threshold)
cover_band = 'tree_{0}'.format(threshold)
# Identify 2000 forest cover at given threshold
tree_area = gfw_data.select(cover_band).gt(0).multiply(
ee.Image.pixelArea()).reduceRegion(**reduce_args).getInfo()
d['tree-extent'] = squaremeters_to_ha(tree_area[cover_band])
# Identify tree gain over data collection period
gain = gfw_data.select('gain').divide(255.0).multiply(
ee.Image.pixelArea()).reduceRegion(**reduce_args).getInfo()
d['gain'] = squaremeters_to_ha(gain['gain'])
# Identify area lost from begin year up untill end year
tmp_img = gfw_data.select(loss_band)
loss_area_img = tmp_img.gte(begin).And(tmp_img.lte(end)).multiply(ee.Image.pixelArea())
loss_total = loss_area_img.reduceRegion(**reduce_args).getInfo()
d['loss'] = squaremeters_to_ha(loss_total[loss_band])
return d | f7d43c8a0d5c8869232d53b2f625c2568de3a1b0 | 9,233 |
def rectangluarMask(image):
"""
this function will take an image as an input and created a rectangluar mask(image sized) and in the center of canvas
"""
mask = np.zeros(image.shape[:2], dtype = 'uint8')
(cX, cY) = (image.shape[1]//2, image.shape[0]//2)
cv2.rectangle(mask, (cX-75, cY-75), (cX+75, cY+75), 255, -1)
# cv2.imshow('Rectangle Mask', mask)
# cv2.waitKey(0)
return mask | df5ae1e31eb259bc02ff75282d2dea2b4a7f547b | 9,234 |
from datetime import datetime
def get_artist_listen_for_change_streams(artist: Artist=None):
"""
Computation steps:
1. Define start and end dates
2. Create stream filters for the current artist
3. aggregate the streams from the Model
4. Return just the number (maybe a dict idk)
"""
# Validate argument data types
if not isinstance(artist, Artist): raise TypeError("Param 'artist' must be an Artist object")
# 1
start_date = datetime.date(year=2020, month=6, day=22)
end_date = datetime.date(year=2020, month=6, day=28)
# 2
stream_song_filter = Q(song__uploaded_by=artist)
stream_time_filter = Q(timestamp__gte=start_date, timestamp__lte=end_date)
# 3
streams = Stream.objects.filter(stream_song_filter, stream_time_filter)
stream_count = streams.aggregate(num_streams=Count('id'))
return stream_count | c0620809e7ebf10138e3c6c93520787c30efa4f9 | 9,235 |
def flip_dict(d):
"""Returns a dict with values and keys reversed.
Args:
d: The dict to flip the values and keys of.
Returns:
A dict whose keys are the values of the original dict, and whose values
are the corresponding keys.
"""
return {v: k for k, v in d.items()} | c9c960209663639613739979c0dc4066a63c44cb | 9,236 |
def has_sample(args):
"""Returns if some kind of sample id is given in args.
"""
return args.sample or args.samples or args.sample_tag | c2ae87acb11232d7f56cb9e09eb8509720669058 | 9,238 |
from typing import Callable
from typing import Any
from typing import Type
import inspect
def make_key_type(func: Callable[..., Any]) -> Type[CallKey]:
"""Construct a type representing a functions signature."""
sig = inspect.signature(func)
# make a format string that unpacks and names the parameters nicely
repr_fmt = (
(
func.__name__
if "<locals>" in func.__qualname__
else func.__module__ + "." + func.__qualname__
)
+ "("
+ ", ".join(name + "={!r}" for name in sig.parameters.keys())
+ ")"
)
# patch the repr so it looked pretty
def _repr(self: Any) -> str:
return repr_fmt.format(*self[:-1])
key_type = type(
func.__name__,
(
namedtuple(
func.__name__,
tuple(sig.parameters.keys()) + ("func__",),
defaults=tuple(p.default for p in sig.parameters.values()) + (func,),
module=func.__module__,
),
CallKey,
),
{
"__repr__": _repr,
"__func__": func,
"__module__": func.__module__,
"__signature__": sig,
"from_call": classmethod(_from_call),
},
)
return key_type | 9f6ab0a5ac20fcc69518f24669035a6b7c6246b6 | 9,240 |
def _get_transmission(self,d,E='config'):
""" calculate the transmittion after thickness d (in m) of material at energy E (in eV)."""
return np.exp(-d*1e6/self.absorption_length(E)) | ac11a97e424390e40544f16b7259bbf9ace30dcb | 9,243 |
def calculate_density(
input_layer,
field=None,
cell_size=None,
cell_size_units="Meters",
radius=None,
radius_units=None,
bounding_polygon_layer=None,
area_units=None,
classification_type="EqualInterval",
num_classes=10,
output_name=None,
context=None,
gis=None,
estimate=False,
future=False):
"""
.. image:: _static/images/cal_density_standard/calculate_density.png
The calculate_density function creates a density map from point or line features by spreading known quantities of
some phenomenon (represented as attributes of the points or lines) across the map. The result is a layer of areas
classified from least dense to most dense.
For point input, each point should represent the location of some event or incident, and the result layer represents
a count of the incident per unit area. A higher density value in a new location means that there are more points near
that location. In many cases, the result layer can be interpreted as a risk surface for future events. For example,
if the input points represent locations of lightning strikes, the result layer can be interpreted as a risk surface
for future lightning strikes.
For line input, the line density surface represents the total amount of line that is near each location. The units of
the calculated density values are the length of line per unit area. For example, if the lines represent rivers, the
result layer will represent the total length of rivers that are within the search radius. This result can be used to
identify areas that are hospitable to grazing animals.
========================= =========================================================
**Argument** **Description**
------------------------- ---------------------------------------------------------
input_layer Required layer. The point or line features from which to calculate density. See :ref:`Feature Input<FeatureInput>`.
------------------------- ---------------------------------------------------------
field Optional string. A numeric field name specifying the number of incidents at each location. For example, if you have points that represent cities, you can use a field representing the population of the city as the count field, and the resulting population density layer will calculate larger population densities near cities with larger populations. If not specified, each location will be assumed to represent a single count.
------------------------- ---------------------------------------------------------
cell_size Optional float. This value is used to create a mesh of points where density values are calculated. The default is approximately 1/1000th of the smaller of the width and height of the analysis extent as defined in the context parameter. The smaller the value, the smoother the polygon boundaries will be. Conversely, with larger values, the polygon boundaries will be more coarse and jagged.
------------------------- ---------------------------------------------------------
cell_size_units Optional string. The units of the cell_size value.
Choice list: ['Miles', 'Feet', 'Kilometers', 'Meters']
------------------------- ---------------------------------------------------------
radius Optional float. A distance specifying how far to search to find point or line features when calculating density values.
------------------------- ---------------------------------------------------------
radius_units Optional string. The units of the radius parameter. If no distance is provided, a default will be calculated that is based on the locations of the input features and the values in the count field (if a count field is provided).
Choice list: ['Miles', 'Feet', 'Kilometers', 'Meters']
------------------------- ---------------------------------------------------------
bounding_polygon_layer Optional layer. A layer specifying the polygon(s) where you want densities to be calculated. For example, if you are interpolating densities of fish within a lake, you can use the boundary of the lake in this parameter and the output will only draw within the boundary of the lake. See :ref:`Feature Input<FeatureInput>`.
------------------------- ---------------------------------------------------------
area_units Optional string. The units of the calculated density values.
Choice list: ['areaUnits', 'SquareMiles']
------------------------- ---------------------------------------------------------
classification_type Optional string. Determines how density values will be classified into polygons.
Choice list: ['EqualInterval', 'GeometricInterval', 'NaturalBreaks', 'EqualArea', 'StandardDeviation']
* EqualInterval - Polygons are created such that the range of density values is equal for each area.
* GeometricInterval - Polygons are based on class intervals that have a geometric series. This method ensures that each class range has approximately the same number of values within each class and that the change between intervals is consistent.
* NaturalBreaks - Class intervals for polygons are based on natural groupings of the data. Class break values are identified that best group similar values and that maximize the differences between classes.
* EqualArea - Polygons are created such that the size of each area is equal. For example, if the result has more high density values than low density values, more polygons will be created for high densities.
* StandardDeviation - Polygons are created based upon the standard deviation of the predicted density values.
------------------------- ---------------------------------------------------------
num_classes Optional int. This value is used to divide the range of predicted values into distinct classes. The range of values in each class is determined by the classification_type parameter.
------------------------- ---------------------------------------------------------
output_name Optional string. Additional properties such as output feature service name.
------------------------- ---------------------------------------------------------
context Optional string. Additional settings such as processing extent and output spatial reference. For calculate_density, there are two settings.
#. Extent (extent)-a bounding box that defines the analysis area. Only those points in the input_layer that intersect the bounding box will be analyzed.
#. Output Spatial Reference (outSR) the output features will be projected into the output spatial reference.
------------------------- ---------------------------------------------------------
gis Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
------------------------- ---------------------------------------------------------
estimate Optional Boolean. Is true, the number of credits needed to run the operation will be returned as a float.
------------------------- ---------------------------------------------------------
future Optional boolean. If True, the result will be a GPJob object and results will be returned asynchronously.
========================= =========================================================
:returns: result_layer : feature layer Item if output_name is specified, else Feature Collection.
.. code-block:: python
USAGE EXAMPLE: To create a layer that shows density of collisions within 2 miles.
The density is classified based upon the standard deviation.
The range of density values is divided into 5 classes.
collision_density = calculate_density(input_layer=collisions,
radius=2,
radius_units='Miles',
bounding_polygon_layer=zoning_lyr,
area_units='SquareMiles',
classification_type='StandardDeviation',
num_classes=5,
output_name='density_of_incidents')
"""
gis = _arcgis.env.active_gis if gis is None else gis
return gis._tools.featureanalysis.calculate_density(
input_layer,
field,
cell_size,
cell_size_units,
radius,
radius_units,
bounding_polygon_layer,
area_units,
classification_type,
num_classes,
output_name,
context,
estimate=estimate, future=future) | 271d1d50cd362f8e660de4ac93cef8a6cb43d967 | 9,244 |
def reverse(operation):
""" decorator that returns sa.not_ for sending operation"""
def decorated(*args, **kwargs):
return sqlalchemy.not_(operation(*args, **kwargs))
return decorated | 3a77ed5e0db081bd67ccbc1c90731f46001288f2 | 9,245 |
def disable_static_generator(view_func):
"""Decorator which prevents caching the response from a view on disk
Flags the view with a ``disable_static_generator`` attribute so
staticgenerator won't ever save its response on the filesystem.
Example::
@disable_static_generator
def myview(request):
# ...
"""
# We could just do view_func.disable_static_generator = True, but
# decorators are nicer if they don't have side-effects, so we return a new
# function.
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.disable_static_generator = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) | 5ad9dff33b1340d909467dcada90a43c1cc7618d | 9,246 |
import tqdm
def create_lengths(text):
"""Create a data frame of the sentence lengths from a text"""
lengths = []
for sentence in tqdm(text):
lengths.append(len(sentence))
return pd.DataFrame(lengths, columns=['counts']) | 6a239563b19d1d2b2f72ae3d425e94f7b28a0d62 | 9,247 |
def basic_auth_string(key, value):
"""Returns basic auth string from key and value"""
key_pass = b":".join((_to_bytes(key), _to_bytes(value)))
token = b64encode(key_pass).decode()
return f"Basic {token}" | 3de47ff05251792d0f5e782af4d7c30d83dfd860 | 9,248 |
import functools
def measureit(_func=None, *, output: Output = None, number: int = 1):
"""
Measure the energy consumption of monitored devices during the execution of the decorated function (if multiple runs it will measure the mean energy)
:param output: output instance that will receive the power consummation data
:param number: number of iteration in the loop in case you need multiple runs or the code is too fast to be measured
"""
def decorator_measure_energy(func):
@functools.wraps(func)
def wrapper_measure(*args, **kwargs):
sensor = Measurement(func.__name__, output)
sensor.begin()
for i in range(number):
val = func(*args, **kwargs)
sensor.end()
sensor._results = sensor._results / number
sensor.export()
return val
return wrapper_measure
if _func is None:
# to ensure the working system when you call it with parameters or without parameters
return decorator_measure_energy
else:
return decorator_measure_energy(_func) | bb28c7423f5d2a479de052554f68d6c99494180d | 9,250 |
def csv_template(n_types, n_type_covariates, initialize_coeffs=True):
"""Creates a template for the parameter specification.
Parameters
----------
n_types : int, optional
Number of types in the model. Default is one.
n_type_covariates : int, optional
Number of covariates to predict type probabilities. Can be two or three.
initialize_coeffs : bool, optional
Whether coefficients are initialized with values or not. Default is ``True``.
"""
template = _base_template()
if n_types > 1:
to_concat = [
template,
_type_prob_template(n_types, n_type_covariates),
_type_shift_template(n_types),
]
template = pd.concat(to_concat, axis=0, sort=False)
if initialize_coeffs is False:
template["value"] = np.nan
return template | d211373b1939242600b0c5c15a30b16f58eab229 | 9,251 |
from . import setup as jssetup
def setup(app):
"""A temporary setup function so that we can use it for
backwards compatability.
This should be removed after a deprecation cycle.
"""
# To avoid circular imports we'll lazily import
js.logger.warning(
(
"`jupyter-sphinx` was initialized with the "
"`jupyter_sphinx.execute` sub-module. Replace this with "
"`jupyter_sphinx`. Initializing with "
"`jupyter_sphinx.execute` will be removed in "
"version 0.3"
)
)
out = jssetup(app)
return out | 16a64701d3b77a1d58126df458d4a3016be1e366 | 9,254 |
import torch
def hinge_loss(logit, target, margin, reduce='sum'):
"""
Args:
logit (torch.Tensor): (N, C, d_1, d_2, ..., d_K)
target (torch.Tensor): (N, d_1, d_2, ..., d_K)
margin (float):
"""
target = target.unsqueeze(1)
tgt_logit = torch.gather(logit, dim=1, index=target)
loss = logit - tgt_logit + margin
loss = torch.masked_fill(loss, loss < 0, 0)
loss = torch.scatter(loss, dim=1, index=target, value=0)
reduce_fn = REDUCE_FN_MAPPINGS[reduce]
return reduce_fn(loss) | 0eb499d4164b37dee657ad0e0a5c1480324434bc | 9,255 |
def roc(model, image, mask, ignore=None, sky=None, n_mask=1, seed=1, thresholds=np.linspace(0.001, 0.999, 500),
dilate=False, rad=1):
""" evaluate model on test set with the ROC curve
:param model: deepCR object
:param image: np.ndarray((N, W, H)) image array
:param mask: np.ndarray((N, W, H)) CR mask array
:param ignore: np.ndarray((N, W, H)) bad pixel array incl. saturation, etc.
:param thresholds: np.ndarray(N) FPR grid on which to evaluate ROC curves
:return: np.ndarray(N), np.ndarray(N): TPR and FPR
"""
kernel = None
if dilate:
kernel = disk(rad)
if type(image) == np.ndarray and len(image.shape) == 3:
data = dataset(image, mask, ignore)
elif type(image[0]) == str:
data = DatasetSim(image, mask, sky=sky, n_mask=n_mask, seed=seed)
else:
raise TypeError('Input must be numpy data arrays or list of file paths!')
(tpr, fpr), (tpr_dilate, fpr_dilate) = _roc(model, data, thresholds=thresholds, dilate=kernel)
if dilate:
return (tpr, fpr), (tpr_dilate, fpr_dilate)
else:
return tpr, fpr | 741381b707e4c732202c0cfdac512b13483f533f | 9,259 |
def parse_testconfig(conffile):
"""Parses the config file for the whole testsuite."""
repo_path, drop_caches, tests_dir, testlog_dir = '', '', '', ''
basebranch, baserev, repo_prof_path, repo_gprof_path = '', '', None, None
fileopen = open(conffile, 'r')
for line in fileopen:
line = line.split('#')[0] # Discard comments
if line == '' or line == '\n':
continue # Discard lines with comments only and empty lines
opt, args = line.split(' ', 1) # Get arguments
if opt == 'MOSES_REPO_PATH:':
repo_path = args.replace('\n', '')
elif opt == 'DROP_CACHES_COMM:':
drop_caches = args.replace('\n', '')
elif opt == 'TEST_DIR:':
tests_dir = args.replace('\n', '')
elif opt == 'TEST_LOG_DIR:':
testlog_dir = args.replace('\n', '')
elif opt == 'BASEBRANCH:':
basebranch = args.replace('\n', '')
elif opt == 'BASEREV:':
baserev = args.replace('\n', '')
elif opt == 'MOSES_PROFILER_REPO:': # Optional
repo_prof_path = args.replace('\n', '')
elif opt == 'MOSES_GOOGLE_PROFILER_REPO:': # Optional
repo_gprof_path = args.replace('\n', '')
else:
raise ValueError('Unrecognized option ' + opt)
config = Configuration(repo_path, drop_caches, tests_dir, testlog_dir,\
basebranch, baserev, repo_prof_path, repo_gprof_path)
fileopen.close()
return config | a01e30a0355eac229018c7736e7d9903f59402ed | 9,260 |
def get_filtered_df(df, vocab_file):
""" Return a data frame with only the words present in the vocab file. """
if vocab_file:
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
# Get the set of words.
words = pd.Series(df.word.values.ravel()).unique()
set_words = set(words)
# Find the words common to data frame and vocab
common_set_words = set_words & set(vocab)
# Filter the dataframe
df_filtered = df[df.word.isin(common_set_words)]
return df_filtered
else:
return df | 7fbfcfd92adc2b55ad3024b6e31ced743fa9ac50 | 9,261 |
def pkcs5_pad(data):
"""
Pad data using PKCS5
"""
pad = KEYCZAR_AES_BLOCK_SIZE - len(data) % KEYCZAR_AES_BLOCK_SIZE
data = data + pad * chr(pad).encode("utf-8")
return data | c4bb6f28284fe8d5d14f8efcede6858959f1b4cc | 9,262 |
import pickle
def gatherData(data, neat, gen, hyp, fileName, savePop=False):
"""Collects run data, saves it to disk, and exports pickled population
Args:
data - (DataGatherer) - collected run data
neat - (Neat) - neat algorithm container
.pop - [Ind] - list of individuals in population
.species - (Species) - current species
gen - (ind) - current generation
hyp - (dict) - algorithm hyperparameters
savePop - (bool) - save current population to disk?
Return:
data - (DataGatherer) - updated run data
"""
data.gatherData(neat.pop, neat.species)
if (gen % hyp['save_mod']) == 0:
data = checkBest(data, hyp)
data.save(gen)
if savePop is True: # Get a sample pop to play with in notebooks
pref = 'log/' + fileName
with open(pref + '_pop.obj', 'wb') as fp:
pickle.dump(neat.pop, fp)
return data | 56c8a01b2e07280dc17a5fa3d76331e39e112d8d | 9,263 |
def impute_bad_concentration_fits(c_lgtc, c_lgtc_min=0.1):
"""Overwrite bad concentration parameter fit values."""
c_lgtc = np.where(c_lgtc < c_lgtc_min, c_lgtc_min, c_lgtc)
return c_lgtc | 88f85003a2030ea34cfe72de85c1061981f86957 | 9,264 |
import numpy
import math
def pearsonr(a0, a1):
"""Pearson r, product-moment correlation coefficient, of two samples.
Covariance divided by product of standard deviations.
https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient#For_a_sample
"""
n = len(a0)
assert n == len(a1)
if n == 0:
# No data, so no notion of correlation.
return float('NaN')
a0 = numpy.array(a0)
a1 = numpy.array(a1)
m0 = numpy.mean(a0)
m1 = numpy.mean(a1)
num = numpy.sum((a0 - m0)*(a1 - m1))
den0_sq = numpy.sum((a0 - m0)**2)
den1_sq = numpy.sum((a1 - m1)**2)
den = math.sqrt(den0_sq*den1_sq)
if den == 0.0:
# No variation in at least one column, so no notion of
# correlation.
return float('NaN')
r = num / den
# Clamp r in [-1, +1] in case of floating-point error.
r = min(r, +1.0)
r = max(r, -1.0)
return r | 64135ebc840bb1673ece1aec24f22c960f89af20 | 9,266 |
def __column(matrix, i):
"""Returns columns from a bidimensional Python list (a list of lists)"""
return [row[i] for row in matrix] | f455245eb8bbda90f185479afc85eecfb481c70c | 9,267 |
import math
def datamask(fmri_data, mask_data):
"""
filter the data by a ROI mask
Parameters:
fmri_data : array
The fMRI data.
The shape of fmri_data is [nx, ny, nz]. nx, ny, nz represent the size of the fMRI data.
mask_data : array
The mask data.
The shape of mask_data is [nx, ny, nz]. nx, ny, nz represent the size of the fMRI data.
Returns
-------
newfmri_data : array
The new fMRI data.
The shape of newfmri_data is [nx, ny, nz]. nx, ny, nz represent the size of the fMRI data.
"""
nx, ny, nz = fmri_data.shape
newfmri_data = np.full([nx, ny, nz], np.nan)
for i in range(nx):
for j in range(ny):
for k in range(nz):
if (mask_data[i, j, k] != 0) and (math.isnan(mask_data[i, j, k]) is False):
newfmri_data[i, j, k] = fmri_data[i, j, k]
return newfmri_data | 235c676636b5cff42fba4da539ad83ed7c4f999a | 9,268 |
def make_global_batch_norm_tests(options):
"""Make a set of tests to do batch_norm_with_global_normalization."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]],
"epsilon": [0.1, 0.0001],
"scale_after": [True, False],
}]
def build_graph(parameters):
"""Build the global batch norm testing graph."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
x_norm = tf.nn.batch_norm_with_global_normalization(
x, mean, variance, scale, offset, parameters["epsilon"],
parameters["scale_after"])
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs) | eb68bd9cdd09c98471939ea88b8ea60b9772ab90 | 9,269 |
import json
def dict_serialize(seqlen_dist_dict):
"""
dict->str
Turns {1:'a',2:'b'}->"[[1,'a'],[2,'b']]"
Why? Because this format plays nice with shell script that runs xlmr_bench.
Avoids curly braces and spaces that makes shell script str input unhappy.
"""
seqlen_dist_lst = list(seqlen_dist_dict.items())
seqlen_dist_str = json.dumps(seqlen_dist_lst)
seqlen_dist_str = seqlen_dist_str.replace(" ", "") # remove spaces
return seqlen_dist_str | a61c51debff922d128fbb26bbe2121063511d4c4 | 9,271 |
def pi_cdecimal():
"""cdecimal"""
D = C.Decimal
lasts, t, s, n, na, d, da = D(0), D(3), D(3), D(1), D(0), D(0), D(24)
while s != lasts:
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
return s | 384bedfc4ca9ba2f869e062581eddc917f9a0104 | 9,272 |
def e_list(a_list: AList) -> set[E]:
"""Unique elements in adjacency list."""
return set(e for n in a_list for nb in a_list[n] for e in a_list[n][nb]) | a59f6170b08faf94d05059f2e77c68f3290acf88 | 9,273 |
def GDAL_QUERY(filename, sql, data={}):
"""
GDAL_QUERY
"""
res = []
sql = sformat(sql, data)
ds = ogr.OpenShared(filename)
if ds:
try:
layer = ds.ExecuteSQL(sql)
definition = layer.GetLayerDefn()
n = definition.GetFieldCount()
for feature in layer:
row = {}
for i in range(n):
fieldname = definition.GetFieldDefn(i).GetName()
row[fieldname] = feature.GetField(fieldname)
res += [row]
except Exception, ex:
print "GDAL_QUERY Exception:", ex
return res | 33e455ef64bf0d168f9d2c03c9ba630a2d9729c3 | 9,275 |
import torch
def solve_maxent_ce(payoffs, steps=1000000, lams=None, lr=None):
"""Calculates the maximum-entropy correlated equilibrium as defined in
Ortiz et al. (2007).
payoffs (torch.Tensor):
Joint payoff tensor.
steps (int, optional):
Number of SGD steps to use in calculations (default: 1000000).
lams (torch.Tensor):
Initialization logits (default: auto-initialied).
lr (float):
SGD learning rate (default: auto-computed).
Ortiz et al., "Maximum entropy correlated equilibria", 2007,
http://proceedings.mlr.press/v2/ortiz07a/ortiz07a.pdf
"""
n = payoffs.size(0)
action_counts = tuple(payoffs.shape[1:])
if lr is None:
tot = 0.0
for i in range(n):
ac = action_counts[i]
payoff_permuted = payoffs[i].transpose(0, i)
gain_mat = payoff_permuted.view(ac, 1, -1) - payoff_permuted.view(1, ac, -1)
tot += torch.abs(gain_mat).sum(dim=0).max().item()
lr = 0.9 / tot
if lams is None:
lams = [(lr * payoffs.new_ones((i, i))) for i in action_counts]
for i in range(n):
rac = torch.arange(action_counts[i])
lams[i][rac, rac] = 0.0
for _ in range(steps):
log_policy = _lams_to_log_policy(lams, payoffs)
policy = torch.exp(log_policy)
pos_regrets = _get_regret(policy, payoffs, positive=True)
neg_regrets = _get_regret(policy, payoffs, positive=False)
eps = 0.5 ** 125
for i in range(n):
ac = action_counts[i]
rac = torch.arange(ac)
chg = ((pos_regrets[i] + eps) / (pos_regrets[i] + neg_regrets[i] + 2 * eps)) - 0.5
chg[rac, rac] = 0.0
lams[i].add_(lr, chg)
lams[i].clamp_(min=0.0)
return policy | 0004b6bbdd5347987c069a68d5baf9a707c85b0c | 9,277 |
def d_latlon(p1, p2):
"""
计算两点间的距离。原文件使用了较复杂的算法,代价较高
这里使用较为相对简单的算法代替,精度不会损失很多
"""
lon_diff, lat_diff = p1 - p2
lon_diff *= cos((p1[1] + p2[1]) * 0.00872664625997165)
return sqrt(lat_diff * lat_diff + lon_diff * lon_diff) * earth_radians | 53fb2c89f5df196f3ae0fd5ccc67b082b246a580 | 9,278 |
def _path_list_creator(path, file_prefix_name, number_of_digits_zfill, file_suffix_name):
"""Creates a list of paths where the files have a predefined prefix,
an incremental number and a predefined suffix on their name,
respectively. Eg.: img01.zdf
Args:
path: a path that leads to the files directory
file_prefix_name: a string that comes before the number
number_of_digits_zfill: a number of digits in the number
file_suffix_name: a string that comes after the number
Returns:
list_of_paths: list of appended paths
"""
num = 1
list_of_paths = []
while True:
file_path = path / f"{file_prefix_name}{str(num).zfill(number_of_digits_zfill)}{file_suffix_name}"
list_of_paths.append(file_path)
next_file_path = path / f"{file_prefix_name}{str(num+1).zfill(number_of_digits_zfill)}{file_suffix_name}"
if not next_file_path.exists():
return list_of_paths
num = num + 1 | 4850edbbf544284b0736ee52188bd53119c50fdf | 9,279 |
def crosscorrelation(array1, array2, std1, std2, **kwargs):
""" Compute crosscorrelation. """
_ = std1, std2, kwargs
xp = cp.get_array_module(array1) if CUPY_AVAILABLE else np
window = array1.shape[-1]
pad_width = [(0, 0)] * (array2.ndim - 1) + [(window//2, window - window//2)]
padded = xp.pad(array2, pad_width=tuple(pad_width))
accumulator = Accumulator('argmax')
for i in range(window):
corrs = (array1 * padded[..., i:i+window]).sum(axis=-1)
accumulator.update(corrs)
return accumulator.get(final=True).astype(float) - window//2 | b24e3577d2a8d28444a4eefd1ef1d80924f08aaf | 9,281 |
import re
def tamper(payload, **kwargs):
"""
Replaces instances of UNION with -.1UNION
Requirement:
* MySQL
Notes:
* Reference: https://raw.githubusercontent.com/y0unge/Notes/master/SQL%20Injection%20WAF%20Bypassing%20shortcut.pdf
>>> tamper('1 UNION ALL SELECT')
'1-.1UNION ALL SELECT'
>>> tamper('1" UNION ALL SELECT')
'1"-.1UNION ALL SELECT'
"""
return re.sub(r"(?i)\s+(UNION )", r"-.1\g<1>", payload) if payload else payload | cbf4fc5b81bc7760aafe6cf65fa498945285e5bb | 9,284 |
def svn_wc_transmit_prop_deltas(*args):
"""
svn_wc_transmit_prop_deltas(char path, svn_wc_adm_access_t adm_access, svn_wc_entry_t entry,
svn_delta_editor_t editor, void baton,
apr_pool_t pool) -> svn_error_t
"""
return _wc.svn_wc_transmit_prop_deltas(*args) | d92cff618027f3bc763491c7122bdf5187b6ba15 | 9,285 |
from typing import Mapping
from typing import Container
from typing import Set
from typing import Sequence
def _make_immutable(obj):
"""Recursively convert a container and objects inside of it into immutable data types."""
if isinstance(obj, (text_type, binary_type)):
return obj
elif isinstance(obj, Mapping):
temp_dict = {}
for key, value in obj.items():
if isinstance(value, Container):
temp_dict[key] = _make_immutable(value)
else:
temp_dict[key] = value
return ImmutableDict(temp_dict)
elif isinstance(obj, Set):
temp_set = set()
for value in obj:
if isinstance(value, Container):
temp_set.add(_make_immutable(value))
else:
temp_set.add(value)
return frozenset(temp_set)
elif isinstance(obj, Sequence):
temp_sequence = []
for value in obj:
if isinstance(value, Container):
temp_sequence.append(_make_immutable(value))
else:
temp_sequence.append(value)
return tuple(temp_sequence)
return obj | 1f7b51c7b0c5d16dfd9fb0eb10e1ca9410287f85 | 9,286 |
def get_source_tokens_tensor(src_tokens):
"""
To enable integration with PyText, src_tokens should be able to support
more features than just token embeddings. Hence when dictionary features are
passed from PyText it will be passed as a tuple
(token_embeddings, dict_feat, ..). Thus, in this case where we need the source
tokens tensor (eg to calculate batch size = source_tokens_tensor.size(0)),
we get the first element on the tuple which is always guaranteed
to be source tokens and do the necessary operation.
eg : bsz, _ = get_source_tokens_tensor(source_tokens)[0].size(0)
"""
if type(src_tokens) is tuple:
return src_tokens[0]
else:
return src_tokens | cf20ceeba82c595dc62b267794ca758360e0386b | 9,287 |
def merge_config_and_args(config, args):
"""
Creates a configuration dictionary based upon command line arguments.
Parameters
----------
config : dict
configurations loaded from the config file
args : object
arguments and there values which could be \
passed in the command line.
Returns
-------
dict
updated configuration dictionary \
with arguments passed in command line.
"""
arg_dict = vars(args)
stripped_dict = {
k: v for k, v in arg_dict.items() if (v is not None)
}
return {**config, **stripped_dict} | 3935cfc525fb99b9513a608ef0e5e8fd7de708f3 | 9,288 |
def contemp2pottemp(salt, tcon, tpot0=None, **rootkwargs):
"""Calculate conservative temp -> potential temp.
Calculate the potential temperature from the absolute salinity and
conservative temperature. Applies either Newton's method or Halley's
method. See `aux.rootfinder` for details on implementation and control
arguments.
Arguments:
salt (float or array): Absolute salinity in g kg-1.
tcon (float or array): Conservative temperature in degrees Celsius.
tpot0 (float or array, optional): Initial estimate of potential
temperature in degrees Celsius. If None (default) then the
conservative temperature is used.
rootkwargs (dict, optional): Additional arguments for the root finder;
see `aux.rootfinder` for available arguments and defaults.
Returns:
tpot (float or array): Potential temperature in degrees Celsius.
"""
# Set initial guess
if tpot0 is None:
tpot0 = tcon
# Set up a function for the rootfinder
update = rootkwargs.get('update', 'newton')
if update == 'newton':
dtpmax = 2
elif update == 'halley':
dtpmax = 3
else:
raise ValueError(
'The update method must be either "newton" or "halley"')
y0 = CSEA*tcon
args = (salt,)
def derfun(tpot, salt):
# Calculate Gibbs function *with adjusted coefficients*
(g0s, *__) = gibbs0(salt, tpot, dtpmax, orig=False)
tabs = TCELS + tpot
hs = [g0s[0]-tabs*g0s[1], -tabs*g0s[2]]
if dtpmax > 2:
hs.append(-g0s[2] - tabs*g0s[3])
return hs
# Apply the root-finding method
tpot = aux.rootfinder(
derfun, y0, tpot0, TMIN, CSEA*TMIN, args, **rootkwargs)
return tpot | fd627f1561e21daaa18f9d84c0fc12d5ab87e7e5 | 9,289 |
import random
import string
def get_random_string(length: int) -> str:
"""
With combination of lower and upper case
"""
return ''.join(random.choice(string.ascii_letters) for i in range(length)) | b9d0c760e92603a4fe1f625615b96a1c2265f22a | 9,290 |
import errno
def _write_callback(connection_id, data_buffer, data_length_pointer):
"""
Callback called by Secure Transport to actually write to the socket
:param connection_id:
An integer identifing the connection
:param data_buffer:
A char pointer FFI type containing the data to write
:param data_length_pointer:
A size_t pointer FFI type of the amount of data to write. Will be
overwritten with the amount of data actually written on return.
:return:
An integer status code of the result - 0 for success
"""
try:
self = _connection_refs.get(connection_id)
if not self:
socket = _socket_refs.get(connection_id)
else:
socket = self._socket
if not self and not socket:
return 0
data_length = deref(data_length_pointer)
data = bytes_from_buffer(data_buffer, data_length)
if self and not self._done_handshake:
self._client_hello += data
error = None
try:
sent = socket.send(data)
except (socket_.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedNoNotify
return SecurityConst.errSSLClosedAbort
if sent != data_length:
pointer_set(data_length_pointer, sent)
return SecurityConst.errSSLWouldBlock
return 0
except (KeyboardInterrupt) as e:
self._exception = e
return SecurityConst.errSSLPeerUserCancelled | 4daa1130c18b28abe92b5a550d1aac734f74d3dc | 9,291 |
import functools
import pickle
def cache(**kwargs):
"""
Cache decorator.
Should be called with `@cache(ttl_sec=123, transform=transform_response)`
Arguments:
ttl_sec: optional,number The time in seconds to cache the response if
status code < 400
transform: optional,func The transform function of the wrapped function
to convert the function response to request response
Usage Notes:
If the wrapped function returns a tuple, the transform function will not
be run on the response. The first item of the tuple must be serializable.
If the wrapped function returns a single response, the transform function
must be passed to the decorator. The wrapper function response must be
serializable.
Decorators in Python are just higher-order-functions that accept a function
as a single parameter, and return a function that wraps the input function.
In this case, because we need to pass kwargs into our decorator function,
we need an additional layer of wrapping; the outermost function accepts the kwargs,
and when called, returns the decorating function `outer_wrap`, which in turn returns
the wrapped input function, `inner_wrap`.
@functools.wraps simply ensures that if Python introspects `inner_wrap`, it refers to
`func` rather than `inner_wrap`.
"""
ttl_sec = kwargs["ttl_sec"] if "ttl_sec" in kwargs else default_ttl_sec
transform = kwargs["transform"] if "transform" in kwargs else None
redis = redis_connection.get_redis()
def outer_wrap(func):
@functools.wraps(func)
def inner_wrap(*args, **kwargs):
has_user_id = 'user_id' in request.args and request.args['user_id'] is not None
key = extract_key(request.path, request.args.items())
if not has_user_id:
cached_resp = redis.get(key)
if cached_resp:
logger.info(f"Redis Cache - hit {key}")
try:
deserialized = pickle.loads(cached_resp)
if transform is not None:
return transform(deserialized)
return deserialized, 200
except Exception as e:
logger.warning(f"Unable to deserialize cached response: {e}")
logger.info(f"Redis Cache - miss {key}")
response = func(*args, **kwargs)
if len(response) == 2:
resp, status_code = response
if status_code < 400:
serialized = pickle.dumps(resp)
redis.set(key, serialized, ttl_sec)
return resp, status_code
serialized = pickle.dumps(response)
redis.set(key, serialized, ttl_sec)
return transform(response)
return inner_wrap
return outer_wrap | 10be4de3f0c6125fb502e2b3598bce18eff52375 | 9,292 |
def RegisterTensorTransformer(name):
"""Registers a dataset."""
def decorator(obj):
TENSOR_TRANSFORMER_REGISTRY[name] = obj
obj.name = name
return obj
return decorator | e033e09ff5172175328c02638a07e9b0ae112615 | 9,293 |
from pathlib import Path
import copy
import threading
import shutil
def handler_factory(
jinja_template_rendered: BytesIO,
base_dir: Path,
events: list = None,
username: str = "thqm",
password: str = None,
oneshot: bool = False,
allow_custom_events: bool = False,
):
"""Create a HTTPHandler class with the desired properties.
Events should appear following the url paremeter 'event', controlling the
server is done through the 'command' url parameter.
Args:
jinja_template_rendered: BytesIO object of the rendered template.
base_dir: directory containing the static/ and templates/ folders.
events: allowed events.
username: basic auth username.
password: basic auth password.
oneshot: stop server after first click.
allow_custom_events: the server will echo the event regardless of it
being in the events list.
Returns:
HTTPHandler class.
"""
class HTTPHandler(BaseHTTPRequestHandler):
extensions_map = {
".html": "text/html",
"": "application/octet-stream", # Default
".css": "text/css",
".js": "text/javascript",
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".svg": "image/svg+xml",
}
def __init__(self, *args, **kwargs):
if events is None:
self.events = []
else:
self.events = events
self.require_login = password is not None
self._auth = b64encode(f"{username}:{password}".encode()).decode()
super().__init__(*args, **kwargs)
def _do_GET(self):
f_obj = self.send_head()
if f_obj:
self.copyfile(f_obj, self.wfile)
f_obj.close()
def do_GET(self):
"""Serve a GET request."""
if self.require_login:
if self.headers.get("Authorization") == "Basic " + self._auth:
self._do_GET()
else:
self.do_HEADAUTH()
else:
self._do_GET()
def do_HEAD(self):
"""Serve a HEAD request."""
f_obj = self.send_head()
if f_obj:
f_obj.close()
def do_HEADAUTH(self):
"""Handle the authentication in the header."""
self.send_response(401)
self.send_header("WWW-Authenticate", 'Basic realm="thqm"')
self.send_header("Content-type", "text/html")
self.end_headers()
def reset(self):
"""Redirect to /."""
self.send_response(302)
self.send_header("Location", "/")
self.end_headers()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
parsed_path = urlparse(self.path)
if parsed_path.query:
query = parse_qs(parsed_path.query)
if "event" in query:
event = query["event"][0]
if allow_custom_events or event in self.events:
echo(event)
if oneshot:
self.shutdown()
else:
self.reset()
if "command" in query:
command = query["command"][0]
if command == "shutdown":
self.shutdown()
path = unquote(parsed_path.path)
f_obj = None
ctype = None
if path == "/":
# if main page
f_obj = copy(jinja_template_rendered)
ctype = "text/html"
else:
try:
f_obj = open(base_dir / path[1:], "rb")
except IOError:
pass
if f_obj is not None:
if not ctype:
ctype = self.guess_type(path)
self.send_response(200)
self.send_header("Content-type", ctype)
self.end_headers()
return f_obj
@staticmethod
def translate_path(path: str) -> str:
"""Cleanup path."""
# abandon query parameters
path = path.split("?", 1)[0]
path = path.split("#", 1)[0]
# remove first /
return unquote(path)[1:]
@staticmethod
def get_query(path: str) -> str:
"""Get the first query parameter."""
paths = path.split("?", 1)
if len(paths) > 1:
return paths[1]
return ""
def shutdown(self):
"""Shutdown the server."""
killer = threading.Thread(target=self.server.shutdown)
killer.start()
@staticmethod
def copyfile(source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path: str) -> str:
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
ext = Path(path).suffix.lower()
return self.extensions_map.get(ext, self.extensions_map[""])
def log_message(self, *args, **kwargs):
"""Disable all prints."""
return HTTPHandler | 119f1ecd6ba6b3172087f85091360f5e5c0c909d | 9,295 |
def add():
"""This is a temporary function to allow users to easily add tracks, mainly for testing."""
form = SQLFORM(db.memo)
if form.process().accepted:
redirect(URL('default', 'index'))
return dict(form=form) | 9ae3f5f707b880667790fa1396e25999188a6c68 | 9,296 |
from inmanta_plugins.terraform.helpers.const import (
TERRAFORM_RESOURCE_STATE_PARAMETER,
)
from typing import Callable
from typing import Optional
from typing import Dict
from typing import List
from pathlib import Path
async def test_update_failed(
project: Project,
server: Server,
client: Client,
environment: str,
agent_factory: Callable[
[UUID, Optional[str], Optional[Dict[str, str]], bool, List[str]], Agent
],
function_temp_dir: str,
cache_agent_dir: str,
):
"""
This test creates a file, then update it by moving it in a forbidden location. The update should fail
but the param containing the state should be updated anyway, showing the current file state, which is null.
"""
file_path_object = Path(function_temp_dir) / Path("test-file.txt")
provider = LocalProvider()
local_file = LocalFile(
"my file", str(file_path_object), "my original content", provider
)
await agent_factory(
environment=environment,
hostname="node1",
agent_map={provider.agent: "localhost"},
code_loader=False,
agent_names=[provider.agent],
)
def model(purged: bool = False) -> str:
m = (
"\nimport terraform\n\n"
+ provider.model_instance("provider")
+ "\n"
+ local_file.model_instance("file", purged)
)
LOGGER.info(m)
return m
assert not file_path_object.exists()
# Create
create_model = model()
project.compile(create_model)
resource: Resource = project.get_resource(
local_file.resource_type, resource_name="my file"
)
assert resource is not None
resource_id = Id.resource_str(resource.id)
async def get_param_short() -> Optional[str]:
return await get_param(
environment=environment,
client=client,
param_id=TERRAFORM_RESOURCE_STATE_PARAMETER,
resource_id=resource_id,
)
assert (
await get_param_short() is None
), "There shouldn't be any state set at this point for this resource"
assert (
await deploy_model(project, create_model, client, environment)
== VersionState.success
)
assert await get_param_short() is not None, "A state should have been set by now"
# Update
forbidden_path_object = Path("/dev/test-file.txt")
local_file.path = str(forbidden_path_object)
update_model = model()
assert (
await deploy_model(project, update_model, client, environment)
== VersionState.failed
)
param = await get_param_short()
assert param is not None, "The state should still be there"
assert param == "null", "The state should be empty as the new file couldn't deploy"
# Delete
delete_model = model(True)
assert (
await deploy_model(project, delete_model, client, environment)
== VersionState.success
)
assert (
await get_param_short() is None
), "The state should have been removed, but wasn't" | 969fb6136ecf2fd1adc3651aaba6d5b44e795e70 | 9,297 |
def is_valid_mac(mac):
"""
Validate mac address
:param mac:
:return: boolean
"""
res = False
if isinstance(mac, str):
if mac:
res = mac_address.match(mac.lower()) is not None
return res | 2d89a6afbe76d99d6d7cf3e1bfa2e9954c5f2a20 | 9,298 |
def get_model(tokenizer, lstm_units):
"""
Constructs the model,
Embedding vectors => LSTM => 2 output Fully-Connected neurons with softmax activation
"""
# get the GloVe embedding vectors
embedding_matrix = get_embedding_vectors(tokenizer)
model = Sequential()
model.add(Embedding(len(tokenizer.word_index)+1,
EMBEDDING_SIZE,
weights=[embedding_matrix],
trainable=False,
input_length=SEQUENCE_LENGTH))
model.add(LSTM(lstm_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(2, activation="softmax"))
# compile as rmsprop optimizer
# aswell as with recall metric
model.compile(optimizer="rmsprop", loss="categorical_crossentropy",
metrics=["accuracy", keras_metrics.precision(), keras_metrics.recall()])
model.summary()
return model | fd8352081898b4fcffe122a7058d0069caa7ab21 | 9,299 |
def annotation_multi_vertical_height(_img, _x, _y_list, _line_color, _text_color, _text_list,
_thickness=1,
_with_arrow=True):
"""
纵向标注多个高度
:param _img: 需要标注的图像
:param _x: 当前直线所在宽度
:param _y_list: 所有y的列表
:param _line_color: 线条颜色(bgr)
:param _text_color: 文本颜色(bgr)
:param _text_list: 所有需要显示的文本
:param _thickness: 线条粗细
:param _with_arrow: 线条两端是否带箭头
:return: 标注后的图像
"""
assert len(_y_list) - 1 == len(_text_list), '线段数与字符串数不匹配'
to_return_img = _img.copy()
# 需要绘制:
# 1. 双向箭头线
# 2. 箭头到头的直线
# 3. 线条对应的文字
for m_start_y, m_end_y, m_text in zip(_y_list[:-1], _y_list[1:], _text_list):
if _with_arrow:
cv2.arrowedLine(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color, thickness=_thickness)
cv2.arrowedLine(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color, thickness=_thickness)
else:
cv2.line(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color, thickness=_thickness)
cv2.line(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color, thickness=_thickness)
text_start_x = _x + 10
text_start_y = m_start_y + (m_end_y - m_start_y) // 2
to_return_img = __annotation_text_on_image(to_return_img, (text_start_x, text_start_y), _text_color, m_text)
for m_y in _y_list:
cv2.line(to_return_img, (_x - 12, m_y), (_x + 12, m_y), _line_color, thickness=_thickness)
return to_return_img | 2e181eddee2dea969b14dc18f910d4c5f82fb371 | 9,300 |
async def list_(hub, ctx, registry_name, resource_group, **kwargs):
"""
.. versionadded:: 3.0.0
Lists all the replications for the specified container registry.
:param registry_name: The name of the container registry.
:param resource_group: The name of the resource group to which the container registry belongs.
CLI Example:
.. code-block:: bash
azurerm.containerregistry.replication.list testrepo testgroup
"""
result = {}
regconn = await hub.exec.azurerm.utils.get_client(
ctx, "containerregistry", **kwargs
)
try:
repls = await hub.exec.azurerm.utils.paged_object_to_list(
regconn.replications.list(
registry_name=registry_name, resource_group_name=resource_group
)
)
for repl in repls:
result[repl["name"]] = repl
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error(
"containerregistry", str(exc), **kwargs
)
result = {"error": str(exc)}
return result | aa24ab14278e49da35fe6851d71e6d375f763b4d | 9,301 |
from typing import Union
def latest_window_partition_selector(
context: ScheduleEvaluationContext, partition_set_def: PartitionSetDefinition[TimeWindow]
) -> Union[SkipReason, Partition[TimeWindow]]:
"""Creates a selector for partitions that are time windows. Selects latest time window that ends
before the schedule tick time.
"""
partitions = partition_set_def.get_partitions(context.scheduled_execution_time)
if len(partitions) == 0:
return SkipReason()
else:
return partitions[-1] | bac6fe78b0111cdf6272c7bf08a0d555971c20a5 | 9,303 |
def html(i):
"""
Input: {
(skip_cid_predix) - if 'yes', skip "?cid=" prefix when creating URLs
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
d=i.get('dict',{})
scp=i.get('skip_cid_prefix','')
bscp=(scp=="yes")
short=i.get('short','')
llm=d.get('meta',{})
llmisc=llm.get('misc',{})
lldict=llm.get('dict',{})
repo_url1=llmisc.get('repo_url1','')
repo_url2=llmisc.get('repo_url2','')
repo_url3=llmisc.get('repo_url3','')
duoa=llmisc.get('data_uoa','')
duid=llmisc.get('data_uid','')
ruoa=llmisc.get('repo_uoa','')
ruid=llmisc.get('repo_uid','')
muid=llmisc.get('module_uid','')
muoa=llmisc.get('module_uoa','')
#Main
title=llmisc.get('title','')
authors=llmisc.get('authors','')
where=llmisc.get('where','')
paper_pdf_url=llmisc.get('paper_pdf_url','')
paper_doi_url=llmisc.get('paper_doi_url','')
artifact_doi_url=llmisc.get('artifact_doi_url','')
workflow=llmisc.get('workflow','')
workflow_url=llmisc.get('workflow_url','')
h=''
article=''
if title!='':
article='<b>'+title+'</b>'
if authors!='':
h+='<div id="ck_entries_space4"></div>\n'
h+='<i>'+authors+'</i>\n'
baaa=llmisc.get('badge_acm_artifact_available','')
baaf=llmisc.get('badge_acm_artifact_functional','')
baar=llmisc.get('badge_acm_artifact_reusable','')
barr=llmisc.get('badge_acm_results_reproduced','')
barp=llmisc.get('badge_acm_results_replicated','')
badges=''
if baaa!='':
badges+=' <a href="http://cTuning.org/ae/reviewing.html#artifacts_available"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/artifacts_available_dl.jpg" width="64"></a>'
if baaf!='':
badges+=' <a href="http://cTuning.org/ae/reviewing.html#artifacts_functional"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/artifacts_evaluated_functional_dl.jpg" width="64"></a>'
if baar!='':
badges+=' <a href="http://cTuning.org/ae/reviewing.html#artifacts_reusable"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/artifacts_evaluated_reusable_dl.jpg" width="64"></a>'
if barr!='':
badges+=' <a href="http://cTuning.org/ae/reviewing.html#results_validated"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/results_reproduced_dl.jpg" width="64"></a>'
if barp!='':
badges+=' <a href="http://cTuning.org/ae/reviewing.html#results_validated"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/results_replicated_dl.jpg" width="64"></a>'
if workflow.lower()=='ck':
x1=''
x2=''
if workflow_url!='':
x1='<a href="'+workflow_url+'">'
x2='</a>'
badges+=' '+x1+'<img src="https://ctuning.org/ae/stamps/ck-workflow.png" width="100">'+x2
if badges!='':
h+='<div id="ck_entries_space4"></div>\n'
h+='<center>'+badges+'</center>\n'
h1=''
if short!='yes':
h+='<div style="background-color:#efefef;margin:5px;padding:5px;">\n'
url0=i.get('url','')
urlc=url0.replace('index.php','c.php') # Needed for components
# x1=''
# x2=''
# if url0!='' and ruid!='':
# prfx=''
# if not bscp: prfx='cid='
# x1='<a href="'+url0+prfx+cfg['module_deps']['component.repo']+':'+ruid+'" target="_blank">'
# x2='</a>'
# h+='<b>Repo name:</b> '+x1+ruoa+x2+'<br>\n'
where_url=llmisc.get('where_url','')
if where!='':
x1=''
x2=''
if where_url!='':
x1='<a href="'+where_url+'">'
x2='</a>'
h+='<b>Where published:</b> '+x1+where+x2+'<br>\n'
if paper_doi_url!='':
x=paper_doi_url
j=paper_doi_url.find('doi.org/')
if j>0: x=paper_doi_url[j+8:]
h+='<b>Article DOI:</b> <a href="'+paper_doi_url+'">'+x+'</a><br>\n'
if paper_pdf_url!='':
h+='<b>Article:</b> <a href="'+paper_pdf_url+'">PDF</a><br>\n'
if artifact_doi_url!='':
x=artifact_doi_url
j=artifact_doi_url.find('doi.org/')
if j>0: x=artifact_doi_url[j+8:]
h+='<b>Artifact DOI:</b> <a href="'+artifact_doi_url+'">'+x+'</a><br>\n'
uaa=llmisc.get('unified_artifact_appendix','')
if uaa!='':
h+='<b>Unified artifact appendix:</b> <a href="'+uaa+'">Link</a><br>\n'
arts=llmisc.get('artifact_sources','')
arts_url=llmisc.get('artifact_sources_url','')
if arts_url!='':
x=arts_url
if arts!='': x=arts
h+='<b>Artifact before standardization:</b> <a href="'+arts_url+'">'+x+'</a><br>\n'
if workflow_url!='':
x=workflow_url
y='Automated workflow'
if workflow!='':
x=workflow
if x=='CK':
x='Link'
y='Standardized CK workflow'
h+='<b>'+y+':</b> <a href="'+workflow_url+'">'+x+'</a>\n'
ck_repo_uid=llmisc.get('ck_repo_uid','')
if ck_repo_uid!='':
prfx=''
if not bscp: prfx='cid='
x=urlc+prfx+cfg['module_deps']['component.repo']+':'+ck_repo_uid
h+=' (<a href="'+x+'">ReproIndex</a>)\n'
h+='<br>\n'
tasks=llmisc.get('tasks',{})
if len(tasks)>0:
h+='<b>Standardized CK pipelines (programs):</b><br>\n'
h+='<div style="margin-left:20px;">\n'
h+=' <ul>\n'
for tuid in tasks:
tt=tasks[tuid]
tuoa=tt.get('data_uoa','')
if tuoa!='':
prfx=''
if not bscp: prfx='cid='
x='<a href="'+urlc+prfx+cfg['module_deps']['component.program']+':'+tuid+'" target="_blank">'+tuoa+'</a>'
h+=' <li><span style="color:#2f0000;">'+x+'</li>\n'
h+=' </ul>\n'
h+='</div>\n'
results=llmisc.get('results','')
results_url=llmisc.get('results_url','')
if results_url!='':
x=results_url
if results!='': x=results
h+='<b>Reproducible results:</b> <a href="'+results_url+'">'+x+'</a><br>\n'
some_results_replicated=llmisc.get('some_results_replicated','')
if some_results_replicated=='yes':
h+='<b>Some results replicated:</b> ✔<br>\n'
rurl=llmisc.get('reproducibility_url','')
if rurl!='':
x='Link'
if 'acm' in rurl.lower() or 'ctuning' in rurl.lower():
x='ACM and cTuning'
h+='<b>Reproducible methodology:</b> <a href="'+rurl+'">'+x+'</a><br>\n'
results_dashboard_url=llmisc.get('results_dashboard_url','')
if results_dashboard_url!='':
x=results_dashboard_url
j=x.find('://')
if j>=0:
x=x[j+3:]
h+='<b>Dashboard with results:</b> <a href="'+results_dashboard_url+'">'+x+'</a><br>\n'
h+='</div>\n'
# Extras
h1=''
if paper_doi_url!='':
h1+='[ <a href="'+paper_doi_url+'" target="_blank">paper</a> ] \n'
# ck_repo_uid=llmisc.get('ck_repo_uid','')
# if ck_repo_uid!='':
# prfx=''
# if not bscp: prfx='cid='
# x=urlc+prfx+cfg['module_deps']['component.repo']+':'+ck_repo_uid
# h1+='[ <a href="'+x+'" target="_blank">CK repository</a> ] \n'
return {'return':0, 'html':h, 'html1':h1, 'article':article} | a2effe3ac9cf9fb8678283cb9d23cf574bc54700 | 9,305 |
def multi_particle_first_np_metafit(n):
"""Fit to plots of two-body matrix elements from various normal-ordering
schemes, where only the first n points are taken from each scheme
"""
name = b'multi_particle_first_{}p_metafit'.format(n)
def mpfnp(fitfn, exp_list, **kwargs):
return multi_particle_metafit_int(
fitfn, exp_list,
sourcedir=DPATH_FILES_INT, savedir=DPATH_PLOTS,
transform=first_np(n),
super_transform_post=s_combine_like(['interaction']),
code='mpf{}p'.format(n), mf_name=name,
xlabel='A', ylabel='Energy (MeV)', **kwargs
)
mpfnp.__name__ = name
return mpfnp | 384b4d7a1627e554e3ba1583236dbb8fde136b9c | 9,306 |
from typing import Union
from pathlib import Path
from typing import List
from typing import Dict
import json
def readJSONLFile(file_name: Union[str, Path]) -> List[Dict]:
"""
Read a '.jsonl' file and create a list of dicts
Args:
file_name: `Union[str,Path]`
The file to open
Returns:
The list of dictionaries read from the 'file_name'
"""
lines = (
open(file_name, 'r', encoding='utf-8').readlines() if isinstance(file_name, str) else
file_name.read_text('utf-8').splitlines(False)
)
return [json.loads(line) for line in lines] | 8e33fad766a255578179828dc76ec793c02f90b9 | 9,307 |
def _dtype_from_cogaudioformat(format: CogAudioFormat) -> np.dtype:
"""This method returns the numpy "data type" for a particular audio format."""
if COG_AUDIO_IS_INT(format):
if COG_AUDIO_FORMAT_DEPTH(format) == COG_AUDIO_FORMAT_DEPTH_S24:
return np.dtype(np.uint8)
elif COG_AUDIO_FORMAT_SAMPLEBYTES(format) == 2:
return np.dtype(np.int16)
elif COG_AUDIO_FORMAT_SAMPLEBYTES(format) == 4:
return np.dtype(np.int32)
elif COG_AUDIO_FORMAT_SAMPLEBYTES(format) == 8:
return np.dtype(np.int64)
elif COG_AUDIO_IS_FLOAT(format):
return np.dtype(np.float32)
elif COG_AUDIO_IS_DOUBLE(format):
return np.dtype(np.float64)
raise NotImplementedError("Cog Audio Format not amongst those supported for numpy array interpretation") | d41b01fddd798eaa526e767775138e4a4e3ce718 | 9,308 |
def makeSiteWhitelist(jsonName, siteList):
"""
Provided a template json file name and the site white list from
the command line options; return the correct site white list based
on some silly rules
"""
if 'LHE_PFN' in jsonName:
siteList = ["T1_US_FNAL"]
print("Overwritting SiteWhitelist to: %s" % siteList)
elif 'LHE' in jsonName or 'DQMHarvest' in jsonName:
siteList = ["T2_CH_CERN"]
print("Overwritting SiteWhitelist to: %s" % siteList)
return siteList | 8f8b11739a30b4338b8dd31afb6c3c57545af6d0 | 9,309 |
import json
import jsonschema
def loadConfig(configFilePath: str) -> {}:
"""Loads configuration"""
config = {}
with open(configFilePath) as configFile:
config = json.load(configFile)
configSchema = {}
with open(CONFIG_SCHEMA_FILE_PATH, "r") as configSchemaFile:
configSchema = json.load(configSchemaFile)
jsonschema.validate(instance=config, schema=configSchema)
return config | d5e1cbd3bc1f61d329f26a40d9dff5b14ca76f22 | 9,310 |
def version_info():
"""
Get version of vakt package as tuple
"""
return tuple(map(int, __version__.split('.'))) | 446a637134484e835f522f2f67c19110796f503d | 9,311 |
from typing import List
def max_crossing_sum(lst: List[int], mid: int, n: int) -> int:
"""
Parameter <mid> is the floor middle index of <lst>.
Parameter <n> is the length of the input list <lst>.
Pre: <lst> is a list of integers and len(lst) >= 2.
Post: returns the maximum contiguous crossing sum starting from the middle of <lst>.
>>> max_crossing_sum([2, -5, 8, -6, 10, -2], 3, 6)
12
"""
left_sum, right_sum, total = 0, 0, 0 # initialize values
# max sum of the left half
k = mid - 1
i = 0
while i < mid:
total += lst[k - i]
i += 1
if total > left_sum:
left_sum = total
# # max sum the left half
# for i in range(mid - 1, -1, -1): # iterate from index mid - 1...0 backward
# total += lst[i]
# if total > left_sum:
# left_sum = total
total = 0
# max sum the right half
for i in range(mid, n): # iterate from index mid...n - 1
total += lst[i]
if total > right_sum:
right_sum = total
# note: left_sum and right_sum are each at least zero
return left_sum + right_sum | 3d873907cb7ed0c14152ec3c2e92a742bd52aa85 | 9,313 |
def getPrimaryHostIp():
"""
Tries to figure out the primary (the one with default route), local
IPv4 address.
Returns the IP address on success and otherwise '127.0.0.1'.
"""
#
# This isn't quite as easy as one would think. Doing a UDP connect to
# 255.255.255.255 turns out to be problematic on solaris with more than one
# network interface (IP is random selected it seems), as well as linux
# where we've seen 127.0.1.1 being returned on some hosts.
#
# So a modified algorithm first try a known public IP address, ASSUMING
# that the primary interface is the one that gets us onto the internet.
# If that fails, due to routing or whatever, we try 255.255.255.255 and
# then finally hostname resolution.
#
sHostIp = getPrimaryHostIpByUdp('8.8.8.8');
if sHostIp.startswith('127.'):
sHostIp = getPrimaryHostIpByUdp('255.255.255.255');
if sHostIp.startswith('127.'):
sHostIp = getPrimaryHostIpByHostname();
return sHostIp; | 127eeb80c21f766c3b877fc6fdfc05aed9bf50ca | 9,315 |
def localize(_bot, _msg, *args, _server=None, _channel=None, **kwargs):
""" Localize message to current personality, if it supports it. """
global messages
# Find personality and check if personality has an alternative for message.
personality = config.get('personality', _server or _current_server, _channel or _current_channel)
if personality and personality in messages_ and _msg in messages_[personality]:
# Replace message.
_msg = messages_[personality][_msg]
kw = _bot.FORMAT_CODES.copy()
kw.update(kwargs)
return _msg.format(*args, **kw) | ba2300388afee37d4bf40dc2ac9fc6f4f04731fa | 9,317 |
def list_events():
"""Show a view with past and future events."""
if "username" not in session:
return redirect("/")
events = actions.get_upcoming_events()
past_events = actions.get_past_events()
return render_template("events.html", count=len(events), past_count=len(past_events),
events=events, past_events=past_events, events_view=True, mode="3") | a4ab3207943ccd302aab6a0785de4cc4a4609994 | 9,319 |
def get_duration(df):
"""Get duration of ECG recording
Args:
df (DataFrame): DataFrame with time/voltage data
Returns:
float: duration of ECG recording
"""
start = df.time.iloc[0]
end = df.time.iloc[-1]
duration = end - start
return duration | 77698afc8ef7af557628d5fea760dc101c3e6112 | 9,321 |
import json
def create_task():
"""Create new post"""
global post_id_counter
body = json.loads(request.data)
title = body.get("title")
link = body.get("link")
username = body.get("username")
if not title or not link or not username:
return json.dumps({"error": "Missing fields in the body"}), 400
post = {
"id": post_id_counter,
"upvotes": 1,
"title": title,
"link": link,
"username": username,
"comments": {}
}
posts[post_id_counter] = post
post_id_counter += 1
return json.dumps(post), 201 | bace1881a104e41d83842992fc7818f2c2a213ac | 9,323 |
def _asklong(*args):
"""_asklong(sval_t value, char format, v(...) ?) -> int"""
return _idaapi._asklong(*args) | f80d4db85461cd3e13de2cfc6006385419729bec | 9,324 |
def describe_bivariate(data:pd.DataFrame,
only_dependent:bool = False,
size_max_sample:int = None,
is_remove_outliers:bool = True,
alpha:float = 0.05,
max_num_rows:int = 5000,
max_size_cats:int = 5,
verbose:bool = False)->pd.DataFrame:
"""
Describe bivariate relationships.
df -- data to be analized.
only_dependent -- only display relationships with dependeces (default, False).
size_max_sample -- maximum sample size to apply analysis with whole sample. If this value
is not None are used random subsamples although it will not remove bivariate
outliers (default, None).
is_remove_outliers -- Remove or not univariate outliers (default, True).
alpha -- significance level (default, 0.05).
max_num_rows -- maximum number of rows allowed without considering a sample (default, 5000).
max_size_cats -- maximum number of possible values in a categorical variable to be allowed (default, 5).
return -- results in a table.
"""
# data preparation
df = preparation(data, max_num_rows, max_size_cats, verbose = True)
# relationship num - num
dfnn = analysis_num_num(df, only_dependent = only_dependent, size_max_sample = size_max_sample,
is_remove_outliers = is_remove_outliers, alpha = alpha, verbose = verbose)
# relationship cat - cat
dfcc = analysis_cat_cat(df, only_dependent = only_dependent, alpha = alpha, verbose = verbose)
# relationship cat - num
dfcn = analysis_cat_num(df, only_dependent = only_dependent, alpha = alpha,
is_remove_outliers = is_remove_outliers, verbose = verbose)
# append results
dfbiv = dfnn.copy()
dfbiv = dfbiv.append(dfcc)
dfbiv = dfbiv.append(dfcn)
# return
return dfbiv | 4754b106cab60dd02ab32b0705802d9459c28593 | 9,325 |
def devilry_multiple_examiners_short_displayname(assignment, examiners, devilryrole):
"""
Returns the examiners wrapped in HTML formatting tags perfect for showing
the examiners inline in a non-verbose manner.
Typically used for showing all the examiners in an
:class:`devilry.apps.core.models_group.AssignmentGroup`.
Handles anonymization based on ``assignment.anonymizationmode`` and ``devilryrole``.
Args:
assignment: A :class:`devilry.apps.core.models.Assignment` object.
The ``assignment`` should be the assignment where the examiners belongs.
examiners: An iterable of :class:`devilry.apps.core.models.Examiner` objects.
devilryrole: See
:meth:`devilry.apps.core.models.Assignment.examiners_must_be_anonymized_for_devilryrole`.
"""
return {
'assignment': assignment,
'examiners': examiners,
'devilryrole': devilryrole,
} | 4afa278f115a2a99ee2f922ef15dd8507293d3cc | 9,327 |
import json
def get_news_blacklist() -> list:
"""Get the users news blacklist from news-blacklist.json.
Returns:
list: List of blacklisted news article titles
"""
try:
with open("news-blacklist.json", encoding="utf-8") as file:
log.info("Getting news blacklist from news-blacklist.json")
user_blacklist = json.load(file)
except FileNotFoundError:
log.warning("No news-blacklist.json found, creating a new one")
user_blacklist = {"blacklist": []}
with open("news-blacklist.json", "w", encoding="utf-8") as file:
json.dump(user_blacklist, file)
return user_blacklist["blacklist"] | b25f2c619e5767d8238e95277e691264eb0682df | 9,329 |
def calc_triangular_number(n: int):
"""
A triangular number or triangle number counts objects
arranged in an equilateral triangle.
More info: https://www.mathsisfun.com/algebra/triangular-numbers.html
:param n:
:return:
"""
return int((n * (n + 1)) / 2) | e3bfefd6e0e9451849cee8f6da252ec128285c85 | 9,330 |
def get_headers(cred=None, filename=None):
"""Return headers for basic HTTP authentication.
Returns:
str: Basic authorization header, including Base64 encoded
username and password.
"""
return {
"Authorization": "Basic {}".format(
get_base64(cred=cred, filename=filename, api="reporting")
)
} | 17a8c941044487a334070d70d9d93071898a31f5 | 9,332 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.