content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import argparse
from pathlib import Path
def _parse_args() -> argparse.Namespace:
"""Registers the script's arguments on an argument parser."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--source-root',
type=Path,
required=True,
help='Prefix to strip from the source files')
parser.add_argument('sources',
type=Path,
nargs='*',
help='Files to mirror to the directory')
parser.add_argument('--directory',
type=Path,
required=True,
help='Directory to which to mirror the sources')
parser.add_argument('--path-file',
type=Path,
help='File with paths to files to mirror')
return parser.parse_args() | cda37a6282b95fca4db51e91bfe98cc44f46fd07 | 4,600 |
def qlist(q):
"""Convenience function that converts asyncio.Queues into lists.
This is inefficient and should not be used in real code.
"""
l = []
# get the messages out
while not q.empty():
l.append(q.get_nowait())
# now put the messages back (since we popped them out)
for i in l[::-1]:
q.put_nowait(item)
return l | 0ce6fb0d543646fb036c35c800d75bbadf670b0d | 4,601 |
def is_stdin(name):
"""Tell whether or not the given name represents stdin."""
return name in STDINS | 535ce3fee9e4a9a42ef24e4b35f84420a61cc529 | 4,602 |
def filter_marker_y_padding(markers_y_indexes, padding_y_top, padding_y_bottom):
"""
Filter the markers indexes for padding space in the top and bottom of answer sheet
:param markers_y_indexes:
:param padding_y_top:
:param padding_y_bottom:
:return:
"""
return markers_y_indexes[(markers_y_indexes > padding_y_top)
& (markers_y_indexes < padding_y_bottom)] | b1eed0ac24bd6a6354072427be4375ad188572a5 | 4,603 |
import pandas as pd
import os
def budget_italy(path):
"""Budget Shares for Italian Households
a cross-section from 1973 to 1992
*number of observations* : 1729
*observation* : households
*country* : Italy
A dataframe containing :
wfood
food share
whouse
housing and fuels share
wmisc
miscellaneous share
pfood
food price
phouse
housing and fuels price
pmisc
miscellaneous price
totexp
total expenditure
year
year
income
income
size
household size
pct
cellule weight
Bollino, Carlo Andrea, Frederico Perali and Nicola Rossi (2000) “Linear
household technologies”, *Journal of Applied Econometrics*, **15(3)**,
253–274.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `budget_italy.csv`.
Returns:
Tuple of np.ndarray `x_train` with 1729 rows and 11 columns and
dictionary `metadata` of column headers (feature names).
"""
path = os.path.expanduser(path)
filename = 'budget_italy.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/BudgetItaly.csv'
maybe_download_and_extract(path, url,
save_file_name='budget_italy.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | dac6aa79f04bda47395a927c0aba950bc53ae33f | 4,604 |
def hr_admin(request):
""" Views for HR2 Admin page """
template = 'hr2Module/hradmin.html'
# searched employee
query = request.GET.get('search')
if(request.method == "GET"):
if(query != None):
emp = ExtraInfo.objects.filter(
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
emp = emp.filter(user_type="faculty")
else:
emp = ExtraInfo.objects.all()
emp = emp.filter(user_type="faculty")
else:
emp = ExtraInfo.objects.all()
emp = emp.filter(user_type="faculty")
context = {'emps': emp}
return render(request, template, context) | b78f78c57282b60b527bbaa03eab9064d881aea1 | 4,605 |
def create_aws_clients(region='us-east-1'):
"""Creates an S3, IAM, and Redshift client to interact with.
Parameters
----------
region : str
The aws region to create each client (default 'us-east-1').
Returns
-------
ec3
A boto3 ec2 resource.
s3
A boto3 s3 resource.
iam
A boto3 iam client.
redshift
A boto3 redshift client.
"""
ec2 = boto3.resource(
'ec2',
region_name=region,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
s3 = boto3.resource(
's3',
region_name=region,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
iam = boto3.client(
'iam',
region_name=region,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
redshift = boto3.client(
'redshift',
region_name=region,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
return ec2, s3, iam, redshift | 3a422ac88791e404d67127bc85bab12b6a8aa4d9 | 4,606 |
def apply_function(f, *args, **kwargs):
""" Apply a function or staticmethod/classmethod to the given arguments.
"""
if callable(f):
return f(*args, **kwargs)
elif len(args) and hasattr(f, '__get__'):
# support staticmethod/classmethod
return f.__get__(None, args[0])(*args, **kwargs)
else:
assert False, "expected a function or staticmethod/classmethod" | 374be0283a234d4121435dbd3fa873640f2b9ad1 | 4,607 |
def join_data(ycom_county, census, land_area_data):
"""
Getting one dataframe from the three datasets
"""
census['LogPopDensity'] = np.log10(census['TotalPop']/land_area_data['LND110200D'])
data = pd.concat(([ycom_county, census]), axis=1)
return data | 171c08d0c5dac721c3100df9be747c90b299a6c1 | 4,608 |
def _createController(config):
"""
Creates the appropriate (hypervisor) controller based on the
given configuration.
This is the place where to perform particular initialization tasks for
the particular hypervisor controller implementations.
@param config: an instance of L{ConfigParser}
"""
hv = config.get('hypervisor', 'name')
hvMod = None
logger.debug("Hypervisor specified in config: '%s'" % hv)
fqHvName = "%s.%s" % (CONTROLLERS_PATH, hv)
try:
hvPkg = __import__(fqHvName, globals=globals(), level=-1)
hvMod = getattr(hvPkg, hv)
except ImportError, e:
msg = "Hypervisor '%s' is not supported. Error: %s" % (hv, e)
logger.fatal(msg)
raise exceptions.ConfigError(msg)
logger.info("Using %s as the HyperVisor" % hvMod.__name__)
return hvMod | eaaa80aed58e72de91e8d93288aec65544b39b45 | 4,609 |
def path_graph():
"""Return a path graph of length three."""
G = nx.path_graph(3, create_using=nx.DiGraph)
G.graph["name"] = "path"
nx.freeze(G)
return G | c5fd4ea322b512bd26755d94581d56ddfb4d52bf | 4,610 |
def dropStudentsWithEvents(df, events,
saveDroppedAs=None,
studentId='BookletNumber',
eventId='Label',
verbose=True):
"""
Drop students with certain events.
It finds students with the events, and use dropStudents() to drop them.
:param df: input data frame with data from multiple students
:param events: a list of events. Each event is a string of event name
:param saveDroppedAs: optionally saving the dropped data to a csv or pickle file. Remember to specify .csv or .pickle
:param studentId: name of the column containing the student ID info; default ot "BookletNumber"
:param eventId: name of the column containing the event name; default to "Label"
:param verbose: default to True
:return: a data frame with students having any of these events dropped.
"""
# error checks
assert (isinstance(df, pd.DataFrame))
for v in [studentId, eventId]:
assert (v in df.columns)
studentsToDrop = df.loc[df[eventId].isin(events), studentId].unique()
if verbose:
print("\ndropStudentsWithEvents:")
print(events)
return dropStudents(df, studentsToDrop, saveDroppedAs, studentId, verbose) | 5308ec96c8d5d3c9704f4a42202656bc4126e645 | 4,611 |
import os
def get_html_app_files_dirs(output_file):
"""
Return a tuple of (parent_dir, dir_name) directory named after the
`output_file` file object file_base_name (stripped from extension) and a
`_files` suffix Return empty strings if output is to stdout.
"""
if is_stdout(output_file):
return '', ''
file_name = output_file.name
parent_dir = os.path.dirname(file_name)
dir_name = fileutils.file_base_name(file_name) + '_files'
return parent_dir, dir_name | 36ac7b6cb1d2071c0728dc34fcdbd6d34da8f708 | 4,612 |
def create_slides(user, node, slideshow_data):
""" Generate SlideshowSlides from data """
""" Returns a collection of SlideshowSlide objects """
slides = []
with transaction.atomic():
for slide in slideshow_data:
slide_obj = SlideshowSlide(
contentnode=node,
sort_order=slide.get("sort_order"),
metadata={
"caption": slide.get('caption'),
"descriptive_text": slide.get('descriptive_text'),
"checksum": slide.get('checksum'),
"extension": slide.get('extension')
}
)
slide_obj.save()
slides.append(slide_obj)
return slides | 6fc31c11f0dc24d17fd82eacd366a0026fb95157 | 4,613 |
def is_valid(sequence):
"""
A string is not valid if the knight moves onto a blank square
and the string cannot contain more than two vowels.
"""
if any(letter == "_" for letter in sequence):
return False
# Check for vowels
# Strings shorter than 3 letters are always ok, as they
# can't contain more than two vowels
if len(sequence) < 3:
return True
# Check longer sequences for number of vowels
vowels="AEIUO"
num_vowels = len([v for v in sequence if v in vowels])
if num_vowels > 2:
return False
# Check for duplicate characters.
# The original question did not say anything about
# repeated characters, but ignoring them would lead to infinite
# sequences, such as AMAMAMA..., where the knight makes the same sequence
# of moves over and over again
if duplicate_characters(sequence):
return False
return True | 0c3a72d05155eaf69ffeb7a734e9ceeabe0c44c2 | 4,614 |
def batch_dl1_to_dl2(
dict_paths,
config_file,
jobid_from_training,
batch_config,
logs,
):
"""
Function to batch the dl1_to_dl2 stage once the lstchain train_pipe batched jobs have finished.
Parameters
----------
dict_paths : dict
Core dictionary with {stage: PATHS} information
config_file : str
Path to a configuration file. If none is given, a standard configuration is applied
jobid_from_training : str
string containing the jobid from the jobs batched in the train_pipe stage, to be passed to the
dl1_to_dl2 function (as a slurm dependency)
batch_config : dict
Dictionary containing the (full) source_environment and the slurm_account strings to be passed to
dl1_dl2 function
logs: dict
Dictionary with logs files
Returns
-------
jobid_for_dl2_to_dl3 : str
string containing the jobids to be passed to the next stage of the workflow (as a slurm dependency)
"""
log_dl1_to_dl2 = {}
jobid_for_dl2_to_dl3 = []
debug_log = {}
log.info("==== START {} ==== \n".format("batch dl1_to_dl2_workflow"))
for paths in dict_paths:
job_logs, jobid = dl1_to_dl2(
paths["input"],
paths["output"],
path_models=paths["path_model"],
config_file=config_file,
wait_jobid_train_pipe=jobid_from_training,
batch_configuration=batch_config,
slurm_options=paths.get("slurm_options", None),
)
log_dl1_to_dl2.update(job_logs)
# Single particle dl1_dl2 jobid to be appended
jobid_for_dl2_to_dl3.append(jobid)
debug_log[jobid] = f"dl1_to_dl2 jobid that depends on : {jobid_from_training} training job"
jobid_for_dl2_to_dl3 = ",".join(jobid_for_dl2_to_dl3)
save_log_to_file(log_dl1_to_dl2, logs["log_file"], workflow_step="dl1_to_dl2")
save_log_to_file(debug_log, logs["debug_file"], workflow_step="dl1_to_dl2")
log.info("==== END {} ====".format("batch dl1_to_dl2_workflow"))
return jobid_for_dl2_to_dl3 | 673a65e2a4fb6e55339117da657734557858cec8 | 4,615 |
import webbrowser
import os
import io
def browse():
"""
A browser for the bibmanager database.
"""
# Content of the text buffer:
bibs = bm.load()
keys = [bib.key for bib in bibs]
compact_text = "\n".join(keys)
expanded_text = "\n\n".join(bib.content for bib in bibs)
# A list object, since I want this to be a global variable
selected_content = [None]
lex_style = style_from_pygments_cls(
pygments.styles.get_style_by_name(cm.get('style')))
custom_style = Style.from_dict({
"status": "reverse",
"status.position": "#aaaa00",
"status.key": "#ffaa00",
"shadow": "bg:#440044",
"not-searching": "#888888",
})
style = merge_styles([lex_style, custom_style])
def get_menubar_text():
return [
("class:status", " ("),
("class:status.key", "enter"),
("class:status", ")select entry ("),
("class:status.key", "e"),
("class:status", ")xpand entry ("),
("class:status.key", "f"),
("class:status", ")ind ("),
("class:status.key", "s"),
("class:status", ")ave ("),
("class:status.key", "h"),
("class:status", ")elp ("),
("class:status.key", "q"),
("class:status", ")uit"),
]
def get_menubar_right_text():
"""Get index of entry under cursor."""
key = get_current_key(text_field.buffer.document, keys)
return f" {keys.index(key) + 1} "
def get_infobar_text():
"""Get author-year-title of entry under cursor."""
key = get_current_key(text_field.buffer.document, keys)
bib = bibs[keys.index(key)]
year = '' if bib.year is None else bib.year
title = 'NO_TITLE' if bib.title is None else bib.title
return f"{bib.get_authors('ushort')}{year}: {title}"
search_buffer = Buffer(
completer=WordCompleter(keys),
complete_while_typing=False,
multiline=False)
search_field = SearchToolbar(
search_buffer=search_buffer,
forward_search_prompt = "Search: ",
backward_search_prompt = "Search backward: ",
ignore_case=False)
text_field = TextArea(
text=compact_text,
lexer=PygmentsLexer(BibTeXLexer),
scrollbar=True,
line_numbers=False,
read_only=True,
search_field=search_field,
input_processors=[HighlightEntryProcessor()],
)
text_field.buffer.name = 'text_area_buffer'
text_field.is_expanded = False
# Shortcut to HighlightEntryProcessor:
for processor in text_field.control.input_processors:
if processor.__class__.__name__ == 'HighlightEntryProcessor':
text_field.bm_processor = processor
# Do not highlight searched text:
sp = text_field.control.default_input_processors[0]
sp._classname = ' '
sp._classname_current = ' '
menu_bar = VSplit([
Window(
FormattedTextControl(get_menubar_text),
style="class:status"),
Window(
FormattedTextControl(get_menubar_right_text),
style="class:status.right",
width=9,
align=WindowAlign.RIGHT),
],
height=1,
)
info_bar = Window(
content=FormattedTextControl(get_infobar_text),
height=D.exact(1),
style="class:status",
)
body = HSplit([
menu_bar,
text_field,
search_field,
info_bar,
])
root_container = FloatContainer(
content=body,
floats=[
Float(
xcursor=True,
ycursor=True,
content=CompletionsMenu(max_height=16, scroll_offset=1),
),
],
)
# Key bindings:
bindings = KeyBindings()
text_focus = Condition(
lambda: get_app().layout.current_window == text_field.window)
dialog_focus = Condition(
lambda: hasattr(get_app().layout.current_window, 'dialog'))
@bindings.add("q", filter=text_focus)
def _quit(event):
event.app.exit()
# Navigation:
@bindings.add("g", filter=text_focus)
def _go_to_first_line(event):
event.current_buffer.cursor_position = 0
@bindings.add("G", filter=text_focus)
def _go_to_last_line(event) -> None:
event.current_buffer.cursor_position = len(event.current_buffer.text)
@bindings.add("d", filter=text_focus)
def _scroll_down(event):
scroll_half_page_down(event)
@bindings.add("u", filter=text_focus)
def _scroll_up(event):
scroll_half_page_up(event)
@bindings.add("n", filter=text_focus)
def _find_next(event):
search_state = event.app.current_search_state
event.current_buffer.apply_search(
search_state, include_current_position=False, count=event.arg)
@bindings.add("N", filter=text_focus)
def _find_previous(event):
search_state = event.app.current_search_state
event.current_buffer.apply_search(
~search_state, include_current_position=False, count=event.arg)
@bindings.add("h", filter=text_focus)
def _show_help(event):
show_message("Shortcuts", help_message)
@bindings.add("f", filter=text_focus)
def _start_search(event):
search.start_search(direction=search.SearchDirection.FORWARD)
@bindings.add("b", filter=text_focus)
def _open_in_browser(event):
key = get_current_key(event.current_buffer.document, keys)
bib = bm.find(key=key, bibs=bibs)
if bib.adsurl is not None:
webbrowser.open(bib.adsurl, new=2)
else:
show_message("Message", f"Entry '{key}' does not have an ADS url.")
@bindings.add("c-c", filter=dialog_focus)
def _close_dialog(event):
get_app().layout.current_window.dialog.future.set_result(None)
@bindings.add("s", filter=text_focus)
def _save_selected_to_file(event):
selected = text_field.bm_processor.selected_entries
if len(selected) == 0:
show_message("Message", "Nothing to save.")
return
async def coroutine():
dialog = TextInputDialog(
title="Save to File",
label_text="\nEnter a file path or leave blank to quit "
"and print to screen:\n(press Control-c to cancel)\n",
completer=PathCompleter(),
)
path = await show_dialog_as_float(dialog)
content = '\n\n'.join(
bibs[keys.index(key)].content for key in selected)
if path == "":
selected_content[0] = content
# The program termination is in TextInputDialog() since I
# need to close this coroutine first.
return
if path is not None:
try:
with open(path, "w") as f:
f.write(content)
except IOError as e:
show_message("Error", str(e))
ensure_future(coroutine())
@bindings.add("enter", filter=text_focus)
def _toggle_selected_entry(event):
"Select/deselect entry pointed by the cursor."
key = get_current_key(event.current_buffer.document, keys)
text_field.bm_processor.toggle_selected_entry(key)
@bindings.add("e", filter=text_focus)
def _expand_collapse_entry(event):
"Expand/collapse current entry."
key, start_end, is_expanded = get_current_key(
event.current_buffer.document, keys,
get_start_end=True, get_expanded=True)
bib = bm.find(key=key, bibs=bibs)
if is_expanded:
event.app.clipboard.set_text(bib.key)
else:
event.app.clipboard.set_text(bib.content + '\n')
text_field.read_only = False
event.current_buffer.cursor_position = start_end[0]
event.current_buffer.delete(count=start_end[1] - start_end[0])
event.current_buffer.paste_clipboard_data(
event.app.clipboard.get_data(), count=event.arg,
paste_mode=PasteMode.VI_BEFORE)
text_field.read_only = True
if is_expanded:
event.current_buffer.cursor_position = start_end[0]
@bindings.add("E", filter=text_focus)
def _expand_collapse_all(event):
"Expand/collapse all entries."
buffer = event.current_buffer
key = get_current_key(buffer.document, keys)
if text_field.is_expanded:
text_field.text = compact_text
else:
text_field.text = expanded_text
buffer.cursor_position = buffer.text.index(key)
text_field.is_expanded = not text_field.is_expanded
@bindings.add("o", filter=text_focus)
def _open_pdf(event):
buffer = event.current_buffer
key = get_current_key(buffer.document, keys)
bib = bm.find(key=key, bibs=bibs)
has_pdf = bib.pdf is not None
has_bibcode = bib.bibcode is not None
is_missing = has_pdf and not os.path.exists(f'{u.BM_PDF()}{bib.pdf}')
if not has_pdf and not has_bibcode:
show_message("Message",
f"BibTeX entry '{key}' does not have a PDF.")
return
if has_pdf and not is_missing:
pm.open(key=key)
#except Exception as e:
# show_message("Message", textwrap.fill(str(e), width=70))
return
if has_pdf and is_missing and not has_bibcode:
show_message("Message",
f"BibTeX entry has a PDF file: {bib.pdf}, but the file "
"could not be found.")
return
# Need to fetch before opening:
async def coroutine():
dialog = MessageDialog(
"PDF file not found",
"Fetch from ADS?\n(might take a few seconds ...)",
asking=True)
fetch = await show_dialog_as_float(dialog)
if fetch:
with io.StringIO() as buf, redirect_stdout(buf):
fetched = pm.fetch(bib.bibcode, replace=True)
fetch_output = buf.getvalue()
if fetched is None:
show_message("PDF fetch failed", fetch_output)
else:
show_message("PDF fetch succeeded.", fetch_output)
pm.open(key=key)
ensure_future(coroutine())
application = Application(
layout=Layout(root_container, focused_element=text_field),
key_bindings=bindings,
enable_page_navigation_bindings=True,
style=style,
full_screen=True,
)
application.run()
if selected_content[0] is not None:
tokens = list(pygments.lex(selected_content[0], lexer=BibTeXLexer()))
print_formatted_text(
PygmentsTokens(tokens),
end="",
style=lex_style,
#output=create_output(sys.stdout),
) | 90f233a9f3a2088067c6e23efedf5a12a3db1b79 | 4,616 |
import sys
def read_lines_from_input(file):
"""
Reads the provided file line by line to provide a list representation of the contained names.
:param file: A text file containing one name per line. If it's None, the input is read from the standard input.
:return: A list of the names contained in the provided text file
"""
if file is None:
file = sys.stdin
return map(lambda l: l.strip(), file.readlines()) | 4a653979ee51afea8e7199772199c1a93dbfecc3 | 4,617 |
import urllib
def is_dataproc_VM():
"""Check if this installation is being executed on a Google Compute Engine dataproc VM"""
try:
dataproc_metadata = urllib.request.urlopen("http://metadata.google.internal/0.1/meta-data/attributes/dataproc-bucket").read()
if dataproc_metadata.decode("UTF-8").startswith("dataproc"):
return True
except:
pass
return False | 21044a482b534ce3625b49080d1c472d587039ad | 4,618 |
def lookup_all(base):
"""Looks up a subclass of a base class from the registry.
Looks up a subclass of a base class with name provided from the
registry. Returns a list of registered subclass if found, None otherwise.
Args:
base: The base class of the subclass to be found.
Returns:
A list of subclass of the name if found, None otherwise.
"""
basename = base.__name__
if basename not in _registries:
return None
registry = _registries[basename]
output = []
for name in registry.keys():
init_args = registry[name][_INIT_ARGS]
if init_args is not None:
output.append(registry[name][_TYPE_TAG](**init_args))
else:
output.append(registry[name][_TYPE_TAG])
return output | de6a8504d0c6cf6f149b597e4d8b41f7b5fc1eff | 4,619 |
def makepyfile(testdir):
"""Fixture for making python files with single function and docstring."""
def make(*args, **kwargs):
func_name = kwargs.pop('func_name', 'f')
# content in args and kwargs is treated as docstring
wrap = partial(_wrap_docstring_in_func, func_name)
args = map(wrap, args)
kwargs = dict(zip(kwargs.keys(), map(wrap, kwargs.values())))
return testdir.makepyfile(*args, **kwargs)
return make | 420733f4ee299514dba4172cfcc93b7429c635ca | 4,620 |
from PIL import Image, ImageDraw, ImageFont
def createTextWatermark(msg, size, loc, fontcolor='white', fontpath='arial.ttf', fontsize=18):
"""Creates a watermark image of the given text.
Puts it at the given location in an RGBA image of the given size.
Location should be a 2-tuple denoting the center location of the text."""
im = Image.new('RGBA', size, (0,0,0,0))
draw = ImageDraw.Draw(im)
font = ImageFont.truetype(fontpath, fontsize)
tw, th = draw.textsize(msg, font=font)
loc = (loc[0] - tw//2, loc[1] - th//2)
draw.text(loc, msg, font=font, fill=fontcolor)
return im | 6a1ae202a92b351f7d7301735dc825e826898522 | 4,621 |
def get_server_pull_config(config:dict):
"""
takes a config dictionary and returns the variables related to server deployment (pull from intersections).
If there is any error in the configuration, returns a quadruple of -1 with a console output of the exception
"""
try:
server = config["DataTransfer"]["server"]
intersection = config["DataTransfer"]["intersection"]
startHour = config["DataTransfer"]["StartTime_PullFromIntersections"]["hour"]
startMinute = config["DataTransfer"]["StartTime_PullFromIntersections"]["minute"]
return server, intersection, startHour, startMinute
except Exception as e:
print(e)
return -1, -1, -1, -1 | 3a5a882bf91cb65462cdbf4fe202bbbc9d52ae2c | 4,622 |
def buff_push(item: BufferItem):
"""
Add BufferItem to the buffer and execute if the buffer is full
"""
q.put(item)
make_dependencies(item)
if q.full():
return buff_empty_partial(q.maxsize - 1)
return None | d45c0f67fa21cade7a0c2462e1cd8167f4939e0b | 4,623 |
import os
def installDirectory():
"""
Return the software installation directory, by looking at location of this
method.
"""
#path = os.path.abspath(os.path.join(os.path.realpath(__file__), os.pardir))
path = os.path.abspath(os.path.realpath(__file__))
path = os.path.abspath(os.path.join(path, '../..'))
#path = path.replace("EGG-INFO/scripts/smodels-config", "")
#path = path.replace("installation.py", "")
return path + "/" | d79d57eea1eb38ec56a864246ac2388d7320a0fa | 4,624 |
from rx.core.operators.take import _take
from typing import Callable
def take(count: int) -> Callable[[Observable], Observable]:
"""Returns a specified number of contiguous elements from the start
of an observable sequence.
.. marble::
:alt: take
-----1--2--3--4----|
[ take(2) ]
-----1--2-|
Example:
>>> op = take(5)
Args:
count: The number of elements to return.
Returns:
An operator function that takes an observable source and
returns an observable sequence that contains the specified
number of elements from the start of the input sequence.
"""
return _take(count) | 636cc982c6c8c9b13a2cecb675bb0ca7aadbcd91 | 4,625 |
from typing import List
from typing import Union
def format_fields_for_join(
fields: List[Union[Field, DrivingKeyField]],
table_1_alias: str,
table_2_alias: str,
) -> List[str]:
"""Get formatted list of field names for SQL JOIN condition.
Args:
fields: Fields to be formatted.
table_1_alias: Alias that should be used in the field on the left side of the
equality sign.
table_2_alias: alias that should be used in the field on the right side of the
equality sign.
Returns:
Fields list formatted for an SQL JOIN condition.
"""
return [
JOIN_CONDITION_SQL_TEMPLATE.format(
field_name=field.name,
table_1_alias=table_1_alias,
table_2_alias=table_2_alias,
)
for field in fields
] | 691a154f8b984b11ed177a7948fe74398c693b25 | 4,626 |
def get_payment_balance(currency):
"""
Returns available balance for selected currency
This method requires authorization.
"""
result = get_data("/payment/balances", ("currency", currency))
payment_balance = namedtuple("Payment_balance", get_namedtuple(result[0]))
return [payment_balance(**element) for element in result] | 354abbf4e9bc1b22a32e31555106ce68a21e9cd1 | 4,627 |
import torch
def build_scheduler(optimizer, config):
"""
"""
scheduler = None
config = config.__dict__
sch_type = config.pop('type')
if sch_type == 'LambdaLR':
burn_in, steps = config['burn_in'], config['steps']
# Learning rate setup
def burnin_schedule(i):
if i < burn_in:
factor = pow(i / burn_in, 4)
elif i < steps[0]:
factor = 1.0
elif i < steps[1]:
factor = 0.1
else:
factor = 0.01
return factor
scheduler = optim.lr_scheduler.LambdaLR(optimizer, burnin_schedule)
elif sch_type == 'StepLR':
# 等间隔调整学习率, 调整倍数为gamma倍,调整间隔为step_size,间隔单位是step,step通常是指epoch。
step_size, gamma = config['step_size'], config['gamma']
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
elif sch_type == 'ReduceLROnPlateau':
# 当某指标不再变化(下降或升高),调整学习率,这是非常实用的学习率调整策略。例如,当验证集的loss不再下降时,进行学习率调整;或者监测验证集的accuracy,当accuracy不再上升时,则调整学习率。
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1,
patience=3, verbose=True, threshold=1e-4)
return scheduler | b205b323db322336426f3c13195cb49735d7284d | 4,628 |
def rpca_alm(X, lmbda=None, tol=1e-7, max_iters=1000, verbose=True,
inexact=True):
"""
Augmented Lagrange Multiplier
"""
if lmbda is None:
lmbda = 1.0 / np.sqrt(X.shape[0])
Y = np.sign(X)
norm_two = svd(Y, 1)[1]
norm_inf = np.abs(Y).max() / lmbda
dual_norm = np.max([norm_two, norm_inf])
Y = Y / dual_norm
A = np.zeros(Y.shape)
E = np.zeros(Y.shape)
dnorm = la.norm(X, ord='fro')
tol_primal = 1e-6 * dnorm
total_svd = 0
mu = 0.5 / norm_two
rho = 6
sv = 5
n = Y.shape[0]
for iter1 in xrange(max_iters):
primal_converged = False
sv = sv + np.round(n * 0.1)
primal_iter = 0
while not primal_converged:
Eraw = X - A + (1/mu) * Y
Eupdate = np.maximum(
Eraw - lmbda/mu, 0) + np.minimum(Eraw + lmbda / mu, 0)
U, S, V = svd(X - Eupdate + (1 / mu) * Y, sv)
svp = (S > 1/mu).sum()
if svp < sv:
sv = np.min([svp + 1, n])
else:
sv = np.min([svp + round(.05 * n), n])
Aupdate = np.dot(
np.dot(U[:, :svp], np.diag(S[:svp] - 1/mu)), V[:svp, :])
if primal_iter % 10 == 0 and verbose >= 2:
print(la.norm(A - Aupdate, ord='fro'))
if ((la.norm(A - Aupdate, ord='fro') < tol_primal and
la.norm(E - Eupdate, ord='fro') < tol_primal) or
(inexact and primal_iter > 5)):
primal_converged = True
A = Aupdate
E = Eupdate
primal_iter += 1
total_svd += 1
Z = X - A - E
Y = Y + mu * Z
mu *= rho
if la.norm(Z, ord='fro') / dnorm < tol:
if verbose:
print('\nConverged at iteration {}'.format(iter1))
break
if verbose:
_verbose(A, E, X)
return A, E | 8c09f8f4b004b9a00655402e5466636aa9fc4390 | 4,629 |
def dwt_embed(wmImage, hostImage, alpha, beta):
"""Embeds a watermark image into a host image, using the First Level
Discrete Wavelet Transform and Alpha Blending.\n
The formula used for the alpha blending is:
resultLL = alpha * hostLL + beta * watermarkLL
Arguments:
wmImage (NumPy array) -- the image to be embedded
hostImage (NumPy array) -- the image to be watermarked
alpha (float) -- the first embedding strength factor
beta (float) -- the second embedding strength factor
Returns:
NumPy array type -- the watermarked image, in float64 format
"""
# Take the dimensions of the host and watermark images
wmHeight, wmWidth = wmImage.shape[:2]
hostHeight, hostWidth = hostImage.shape[:2]
# Resize the watermark image so that it is the same size as the host image
if wmHeight > hostHeight or wmWidth > hostWidth:
# Scale down the watermark image
wmImage = cv2.resize(wmImage, (hostWidth, hostHeight), interpolation = cv2.INTER_AREA)
elif wmHeight < hostHeight or wmWidth < hostWidth:
# Scale up the watermark image
wmImage = cv2.resize(wmImage, (hostWidth, hostHeight), interpolation = cv2.INTER_LINEAR)
# Take the new dimensions of the watermark image
wmHeight, wmWidth = wmImage.shape[:2]
# Split both images into channels
hostB, hostG, hostR = cv2.split(hostImage)
wmB, wmG, wmR = cv2.split(wmImage)
# Compute the first level bidimensional DWT for each channel of both images
# (LL, (HL, LH, HH))
cAhostB, (cHhostB, cVhostB, cDhostB) = pywt.dwt2(hostB, 'db2')
cAhostG, (cHhostG, cVhostG, cDhostG) = pywt.dwt2(hostG, 'db2')
cAhostR, (cHhostR, cVhostR, cDhostR) = pywt.dwt2(hostR, 'db2')
cAhostHeight, cAhostWidth = cAhostB.shape
cAwmB, (cHwmB, cVwmB, cDwmB) = pywt.dwt2(wmB, 'db2')
cAwmG, (cHwmG, cVwmG, cDwmG) = pywt.dwt2(wmG, 'db2')
cAwmR, (cHwmR, cVwmR, cDwmR) = pywt.dwt2(wmR, 'db2')
cAwmHeight, cAwmWidth = cAwmB.shape
# Generate image matrix for containing all four host coefficients images
coeffsHost = np.zeros((cAhostHeight * 2, cAhostWidth * 2, 3), dtype = 'float64')
# Merge channels for each of A, H, V and D and build the host coefficients image
cAhost = cv2.merge([cAhostB, cAhostG, cAhostR])
coeffsHost[0:cAhostHeight, 0:cAhostWidth] = cAhost
cHhost = cv2.merge([cHhostB, cHhostG, cHhostR])
coeffsHost[0:cAhostHeight, cAhostWidth:cAhostWidth * 2] = cHhost
cVhost = cv2.merge([cVhostB, cVhostG, cVhostR])
coeffsHost[cAhostHeight:cAhostHeight * 2, 0:cAhostWidth] = cVhost
cDhost = cv2.merge([cDhostB, cDhostG, cDhostR])
coeffsHost[cAhostHeight:cAhostHeight * 2, cAhostWidth:cAhostWidth * 2] = cDhost
# Display the host coefficients image
temp = np.uint8(np.rint(coeffsHost))
cv2.imshow('Host DWT', temp)
# Generate image matrix for containing all four watermark coefficients images
coeffsWm = np.zeros((cAwmHeight * 2, cAwmWidth * 2, 3), dtype = 'float64')
# Merge channels for each of A, H, V and D and build the wm coefficients image
cAwm = cv2.merge([cAwmB, cAwmG, cAwmR])
coeffsWm[0:cAwmHeight, 0:cAwmWidth] = cAwm
cHwm = cv2.merge([cHwmB, cHwmG, cHwmR])
coeffsWm[0:cAwmHeight, cAwmWidth:cAwmWidth * 2] = cHwm
cVwm = cv2.merge([cVwmB, cVwmG, cVwmR])
coeffsWm[cAwmHeight:cAwmHeight * 2, 0:cAwmWidth] = cVwm
cDwm = cv2.merge([cDwmB, cDwmG, cDwmR])
coeffsWm[cAwmHeight:cAwmHeight * 2, cAwmWidth:cAwmWidth * 2] = cDwm
# Display the watermark coefficients image
temp = np.uint8(np.rint(coeffsWm))
cv2.imshow('Watermark DWT', temp)
# Apply the Alpha Blending Technique
# wmImageLL = alpha * hostLL + beta * wmLL
cAresult = alpha * cAhost + beta * cAwm
cAresultB, cAresultG, cAresultR = cv2.split(cAresult)
# Compute the channels of the watermarked image by applying the inverse DWT
resultB = pywt.idwt2((cAresultB, (cHhostB, cVhostB, cDhostB)), 'db2')
resultG = pywt.idwt2((cAresultG, (cHhostG, cVhostG, cDhostG)), 'db2')
resultR = pywt.idwt2((cAresultR, (cHhostR, cVhostR, cDhostR)), 'db2')
# Merge the channels and obtain the final watermarked image
resultImage = cv2.merge([resultB, resultG, resultR])
return resultImage | 939e8d14ceb9452dc873f7b2d9472630211c0432 | 4,630 |
def make_file_iterator(filename):
"""Return an iterator over the contents of the given file name."""
# pylint: disable=C0103
with open(filename) as f:
contents = f.read()
return iter(contents.splitlines()) | e7b612465717dafc3155d9df9fd007f7aa9af509 | 4,631 |
def build_summary(resource, children, attribute, summarizer, keep_details=False):
"""
Update the `resource` Resource with a summary of itself and its `children`
Resources and this for the `attribute` key (such as copyrights, etc).
- `attribute` is the name of the attribute ('copyrights', 'holders' etc.)
- `summarizer` is a function that takes a list of texts and returns
summarized texts with counts
"""
# Collect current data
values = getattr(resource, attribute, [])
no_detection_counter = 0
if values:
# keep current data as plain strings
candidate_texts = [entry.get('value') for entry in values]
else:
candidate_texts = []
if resource.is_file:
no_detection_counter += 1
# Collect direct children existing summaries
for child in children:
child_summaries = get_resource_summary(child, key=attribute, as_attribute=keep_details) or []
for child_summary in child_summaries:
count = child_summary['count']
value = child_summary['value']
if value:
candidate_texts.append(Text(value, value, count))
else:
no_detection_counter += count
# summarize proper using the provided function
summarized = summarizer(candidate_texts)
# add back the counter of things without detection
if no_detection_counter:
summarized.update({None: no_detection_counter})
summarized = sorted_counter(summarized)
if TRACE:
logger_debug('COPYRIGHT summarized:', summarized)
set_resource_summary(resource, key=attribute, value=summarized, as_attribute=keep_details)
return summarized | 622a560c257eceae6d82dc93ffc15718fca0152d | 4,632 |
def little_endian_bytes_to_int(little_endian_byte_seq):
"""Converts a pair of bytes into an integer.
The `little_endian_byte_seq` input must be a 2 bytes sequence defined
according to the little-endian notation (i.e. the less significant byte
first).
For instance, if the `little_endian_byte_seq` input is equals to
``(0xbc, 0x02)`` this function returns the decimal value ``700`` (0x02bc in
hexadecimal notation).
:param bytes little_endian_byte_seq: the 2 bytes sequence to be converted.
It must be compatible with the "bytes" type and defined according to the
little-endian notation.
"""
# Check the argument and convert it to "bytes" if necessary.
# Assert "little_endian_byte_seq" items are in range (0, 0xff).
# "TypeError" and "ValueError" are sent by the "bytes" constructor if
# necessary.
# The statement "tuple(little_endian_byte_seq)" implicitely rejects
# integers (and all non-iterable objects) to compensate the fact that the
# bytes constructor doesn't reject them: bytes(2) is valid and returns
# b'\x00\x00'
little_endian_byte_seq = bytes(tuple(little_endian_byte_seq))
# Check that the argument is a sequence of two items
if len(little_endian_byte_seq) != 2:
raise ValueError("A sequence of two bytes is required.")
integer = little_endian_byte_seq[1] * 0x100 + little_endian_byte_seq[0]
return integer | d8d0c6d4ebb70ea541e479b21deb913053886748 | 4,633 |
def higher_follower_count(A, B):
""" Compares follower count key between two dictionaries"""
if A['follower_count'] >= B['follower_count']: return "A"
return "B" | d4d182ca5a3c5bff2bc7229802603a82d44a4d67 | 4,634 |
def _element_or_none(germanium, selector, point):
"""
Function to check if the given selector is only a regular
element without offset clicking. If that is the case, then we
enable the double hovering in the mouse actions, to solve a
host of issues with hovering and scrolling, such as elements
appearing on mouse in, or edge not hovering correctly.
:param germanium:
:param selector:
:param point:
:return:
"""
if isinstance(selector, Point):
return None
if point:
return None
return _element(germanium, selector) | b3de13ecefc7b8593d4b61e7caf63eee41d1521a | 4,635 |
def ENDLEMuEpP_TransferMatrix( style, tempInfo, crossSection, productFrame, angularData, EMuEpPData, multiplicity, comment = None ) :
"""This is LLNL I = 1, 3 type data."""
logFile = tempInfo['logFile']
workDir = tempInfo['workDir']
s = versionStr + '\n'
s += "Process: 'Double differential EMuEpP data transfer matrix'\n"
s += commonDataToString( comment, style, tempInfo, crossSection, productFrame, multiplicity = multiplicity )
s += angularToString( angularData, crossSection )
s += EMuEpPDataToString( EMuEpPData )
return( executeCommand( logFile, transferMatrixExecute, s, workDir, tempInfo['workFile'], tempInfo['restart'] ) ) | 224e72f52ad6b143e51a50962d548084a8e7c283 | 4,636 |
def _fit_gaussian(f, grid, image_spot, p0, lower_bound=None, upper_bound=None):
"""Fit a gaussian function to a 3-d or 2-d image.
# TODO add equations and algorithm
Parameters
----------
f : func
A 3-d or 2-d gaussian function with some parameters fixed.
grid : np.ndarray, np.float
Grid data to compute the gaussian function for different voxel within
a volume V or surface S. In nanometer, with shape (3, V_z * V_y * V_x),
or (2, S_y * S_x).
image_spot : np.ndarray, np.uint
A 3-d or 2-d image with detected spot and shape (z, y, x) or (y, x).
p0 : List
List of parameters to estimate.
lower_bound : List
List of lower bound values for the different parameters.
upper_bound : List
List of upper bound values for the different parameters.
Returns
-------
popt : np.ndarray
Fitted parameters.
pcov : np.ndarray
Estimated covariance of 'popt'.
"""
# TODO check that we do not fit a 2-d gaussian function to a 3-d image or
# the opposite
# compute lower bound and upper bound
if lower_bound is None:
lower_bound = [-np.inf for _ in p0]
if upper_bound is None:
upper_bound = [np.inf for _ in p0]
bounds = (lower_bound, upper_bound)
# Apply non-linear least squares to fit a gaussian function to a 3-d image
y = np.reshape(image_spot, (image_spot.size,)).astype(np.float32)
popt, pcov = curve_fit(f=f, xdata=grid, ydata=y, p0=p0, bounds=bounds)
return popt, pcov | 6fa75997af8dfee3cf90bdcab7919c6eeea0578e | 4,637 |
def createfourierdesignmatrix_chromatic(toas, freqs, nmodes=30, Tspan=None,
logf=False, fmin=None, fmax=None,
idx=4):
"""
Construct Scattering-variation fourier design matrix.
:param toas: vector of time series in seconds
:param freqs: radio frequencies of observations [MHz]
:param nmodes: number of fourier coefficients to use
:param freq: option to output frequencies
:param Tspan: option to some other Tspan
:param logf: use log frequency spacing
:param fmin: lower sampling frequency
:param fmax: upper sampling frequency
:param idx: Index of chromatic effects
:return: F: Chromatic-variation fourier design matrix
:return: f: Sampling frequencies
"""
# get base fourier design matrix and frequencies
F, Ffreqs = utils.createfourierdesignmatrix_red(
toas, nmodes=nmodes, Tspan=Tspan, logf=logf,
fmin=fmin, fmax=fmax)
# compute the DM-variation vectors
Dm = (1400/freqs) ** idx
return F * Dm[:, None], Ffreqs | 59420ea9bde77f965f4571bdec5112d026c63478 | 4,638 |
def get_word_data(char_data):
"""
获取分词的结果
:param char_data:
:return:
"""
seq_data = [''.join(l) for l in char_data]
word_data = []
# stop_words = [line.strip() for line in open(stop_word_file, 'r', encoding='utf-8')]
for seq in seq_data:
seq_cut = jieba.cut(seq, cut_all=False)
word_data.append([w for w in seq_cut ])
return word_data | 8ca306d0f3f4c94f6d67cdc7b865ddef4f639291 | 4,639 |
import os
def make_non_absolute(path):
"""
Make a path non-absolute (so it can be joined to a base directory)
@param path: The file path
"""
drive, path = os.path.splitdrive(path)
index = 0
while os.path.isabs(path[index:]):
index = index + 1
return path[index:] | 51eefa84423077273931a3a4c77b4e53669a6599 | 4,640 |
from typing import List
from typing import Dict
from typing import Any
def get_output_stream(items: List[Dict[str, Any]]) -> List[OutputObject]:
"""Convert a list of items in an output stream into a list of output
objects. The element in list items are expected to be in default
serialization format for output objects.
Paramaters
----------
items: list(dict)
Items in the output stream in default serialization format
Returns
-------
list(vizier.viztrail.module.OutputObject)
"""
result = list()
for item in items:
result.append(
OutputObject(
type=item[KEY_OUTPUT_TYPE],
value=item[KEY_OUTPUT_VALUE]
)
)
return result | 841bffba3f0e4aeab19ca31b62807a5a30e818f1 | 4,641 |
def lvnf_stats(**kwargs):
"""Create a new module."""
return RUNTIME.components[LVNFStatsWorker.__module__].add_module(**kwargs) | 1bdf94687101b8ab90684b67227acec35205e320 | 4,642 |
import re
def parse_float(string):
"""
Finds the first float in a string without casting it.
:param string:
:return:
"""
matches = re.findall(r'(\d+\.\d+)', string)
if matches:
return matches[0]
else:
return None | 4adea9226d0f67cd4d2dfe6a2b65bfd24f3a7ecb | 4,643 |
def objectproxy_realaddress(obj):
"""
Obtain a real address as an integer from an objectproxy.
"""
voidp = QROOT.TPython.ObjectProxy_AsVoidPtr(obj)
return C.addressof(C.c_char.from_buffer(voidp)) | 6c2f1a2b0893ef2fd90315a2cd3a7c5c5524707f | 4,644 |
def CollateRevisionHistory(builds, repo):
"""Sorts builds and revisions in repository order.
Args:
builds: a dict of the form:
```
builds := {
master: {
builder: [Build, ...],
...,
},
...
}
```
repo (GitWrapper): repository in which the revision occurs.
Returns:
A 2-tuple of (build_history, revisions), where:
```
build_history := {
master: {
builder: [Build, ...],
...,
},
...
}
```
and
```
revisions := [revision, ...]
```
"""
build_history = {}
revisions = set()
for master, master_data in builds.iteritems():
LOGGER.debug('Collating master %s', master)
master_history = build_history.setdefault(master, {})
for builder, builder_data in master_data.iteritems():
LOGGER.debug('Collating builder %s', builder)
for build in builder_data:
revisions.add(str(build.revision))
master_history[builder] = repo.sort(
builder_data, keyfunc=lambda b: b.revision)
revisions = repo.sort(revisions)
return (build_history, revisions) | e092d5c77c3767dbcf02ba7e19ba5c923bd9aad7 | 4,645 |
def delta_shear(observed_gal, psf_deconvolve, psf_reconvolve, delta_g1, delta_g2):
"""
Takes in an observed galaxy object, two PSFs for metacal (deconvolving
and re-convolving), and the amount by which to shift g1 and g2, and returns
a tuple of tuples of modified galaxy objects.
((g1plus, g1minus), (g2plus, g2minus))
"""
# Deconvolving by psf_deconvolve
inv_psf = galsim.Deconvolve(psf_deconvolve)
deconvolved = galsim.Convolve(observed_gal, inv_psf)
# Applying second shear in g1
sheared_plus_g1 = deconvolved.shear(g1=delta_g1, g2=0)
sheared_minus_g1 = deconvolved.shear(g1=-delta_g1, g2=0)
# Applying second shear in g2
sheared_plus_g2 = deconvolved.shear(g1=0, g2=delta_g2)
sheared_minus_g2 = deconvolved.shear(g1=0, g2=-delta_g2)
# Reconvolving by psf_reconvolve for g1
reconvolved_plus_g1 = galsim.Convolve(sheared_plus_g1, psf_reconvolve)
reconvolved_minus_g1 = galsim.Convolve(sheared_minus_g1, psf_reconvolve)
g1_plus_minus = (reconvolved_plus_g1, reconvolved_minus_g1)
# Reconvolving by psf_reconvolve for g2
reconvolved_plus_g2 = galsim.Convolve(sheared_plus_g2, psf_reconvolve)
reconvolved_minus_g2 = galsim.Convolve(sheared_minus_g2, psf_reconvolve)
g2_plus_minus = (reconvolved_plus_g2, reconvolved_minus_g2)
# g1_plus_minus = (sheared_plus_g1, sheared_minus_g1)
# g2_plus_minus = (sheared_plus_g2, sheared_minus_g2)
# adding noshear reconvolved for testing
reconvolved_noshear = galsim.Convolve(deconvolved, psf_reconvolve)
return g1_plus_minus, g2_plus_minus, reconvolved_noshear | 13ab29088a1a88305e9f74ab1b43351f2d19b3c6 | 4,646 |
def estimateModifiedPiSquared(n):
"""
Estimates that value of Pi^2 through a formula involving partial sums.
n is the number of terms to be summed; the larger the more accurate the
estimation of Pi^2 tends to be (but not always).
The modification relative to estimatePiSquared() is that the n terms are
added in reverse order (i.e. the smallest values are added first).
"""
partialSum = 0 # Initializing
# Implementation of the mathematical formula involving summing
for k in range(n, 0, -1): # Order reversed
partialSum += 1 / (k ** 2)
estimate = 6*partialSum
return estimate | 652376bf0964990905bf25b12ad8ab5156975dea | 4,647 |
def pattern_match(template, image, upsampling=16, metric=cv2.TM_CCOEFF_NORMED, error_check=False):
"""
Call an arbitrary pattern matcher using a subpixel approach where the template and image
are upsampled using a third order polynomial.
Parameters
----------
template : ndarray
The input search template used to 'query' the destination
image
image : ndarray
The image or sub-image to be searched
upsampling : int
The multiplier to upsample the template and image.
func : object
The function to be used to perform the template based matching
Options: {cv2.TM_CCORR_NORMED, cv2.TM_CCOEFF_NORMED, cv2.TM_SQDIFF_NORMED}
In testing the first two options perform significantly better with Apollo data.
error_check : bool
If True, also apply a different matcher and test that the values
are not too divergent. Default, False.
Returns
-------
x : float
The x offset
y : float
The y offset
strength : float
The strength of the correlation in the range [-1, 1].
"""
if upsampling < 1:
raise ValueError
# Fit a 3rd order polynomial to upsample the images
if upsampling != 1:
u_template = zoom(template, upsampling, order=3)
u_image = zoom(image, upsampling, order=3)
else:
u_template = template
u_image = image
result = cv2.matchTemplate(u_image, u_template, method=metric)
_, max_corr, min_loc, max_loc = cv2.minMaxLoc(result)
if metric == cv2.TM_SQDIFF or metric == cv2.TM_SQDIFF_NORMED:
x, y = (min_loc[0], min_loc[1])
else:
x, y = (max_loc[0], max_loc[1])
# Compute the idealized shift (image center)
ideal_y = u_image.shape[0] / 2
ideal_x = u_image.shape[1] / 2
# Compute the shift from template upper left to template center
y += (u_template.shape[0] / 2)
x += (u_template.shape[1] / 2)
x = (x - ideal_x) / upsampling
y = (y - ideal_y) / upsampling
return x, y, max_corr, result | adb98b96d9ca778a909868c0c0851bf52b1f0a1b | 4,648 |
def main(argv=[__name__]):
"""Raspi_x10 command line interface.
"""
try:
try:
devices_file, rules_file, special_days_file = argv[1:]
except ValueError:
raise Usage('Wrong number of arguments')
sched = Schedule()
try:
sched.load_conf(devices_file, 'x10_devices', 'devices')
sched.load_conf(rules_file, 'x10_rules', 'rules')
sched.load_conf(special_days_file, 'special_days', 'special_days')
except IOError:
raise Usage
except KeyError as err:
raise Usage('KeyError: {0}'.format(err))
sched.build()
sched.write()
return 0
except Usage as err:
log.error('{0.msg}\n{0.usage}'.format(err))
return 2 | 583df25dc3fb3059d6ed5b87d61a547fc1a11935 | 4,649 |
def HexaMeshIndexCoord2VoxelValue(nodes, elements, dim, elementValues):
"""
Convert hexamesh (bricks) in index coordinates to volume in voxels with value of voxels assigned according to elementValues.
dim: dimension of volume in x, y and z in voxels (tuple)
elementValues: len(elements) == len(elementValues)
Example: to retrieve nodes corresponding to element 217:
nodesSortedUnique[elements[217],:]
Given the default voxelSize and origin, coordinates range from (-0.5 to dimXYZ+0.5)
nodesSortedUnique.shape = (nodes,3)
"""
volume = np.zeros(dim, dtype=elementValues.dtype) # initialize volume of False
xyz = nodes[elements,:][:,0,:] + 0.5 # voxel coordinates of bone
xyz = xyz.astype(int)
volume[tuple(xyz.T)] = elementValues
return volume | 8dcab059dd137173e780b7dd9941c80c89d7929c | 4,650 |
def hamiltonian(latt: Lattice, eps: (float, np.ndarray) = 0.,
t: (float, np.ndarray) = 1.0,
dense: bool = True) -> (csr_matrix, np.ndarray):
"""Computes the Hamiltonian-matrix of a tight-binding model.
Parameters
----------
latt : Lattice
The lattice the tight-binding model is defined on.
eps : array_like, optional
The on-site energies of the model.
t : array_like, optional
The hopping energies of the model.
dense : bool, optional
If ``True`` the hamiltonian matrix is returned as a ``np.ndarray``
Returns
-------
ham : csr_matrix or np.ndarray
The Hamiltonian-matrix as a sparse or dense matrix.
"""
dmap = latt.data.map()
data = np.zeros(dmap.size)
data[dmap.onsite()] = eps
data[dmap.hopping()] = t
ham = csr_matrix((data, dmap.indices))
if dense:
ham = ham.toarray()
return ham | 63df0f8557ba13fe3501506974c402faca1811f5 | 4,651 |
def pad_in(string: str, space: int) -> str:
"""
>>> pad_in('abc', 0)
'abc'
>>> pad_in('abc', 2)
' abc'
"""
return "".join([" "] * space) + string | 325c0751da34982e33e8fae580af6f439a2dcac0 | 4,652 |
def get_notifies(request):
"""页面展示全部通知"""
user = request.siteuser
if not user:
return HttpResponseRedirect(reverse('siteuser_login'))
notifies = Notify.objects.filter(user=user).select_related('sender').order_by('-notify_at')
# TODO 分页
ctx = get_notify_context(request)
ctx['notifies'] = notifies
return render_to_response(
notify_template,
ctx,
context_instance=RequestContext(request)
) | 8042b42f03a7ce48b7355a7ba51f02937c00d9d0 | 4,653 |
def get_existing_rule(text):
"""
Return the matched rule if the text is an existing rule matched exactly,
False otherwise.
"""
matches = get_license_matches(query_string=text)
if len(matches) == 1:
match = matches[0]
if match.matcher == MATCH_HASH:
return match.rule | 9c41241532977b0a30485c7b7609da3c6e75b59c | 4,654 |
import time
def confirm_channel(bitcoind, n1, n2):
"""
Confirm that a channel is open between two nodes
"""
assert n1.id() in [p.pub_key for p in n2.list_peers()]
assert n2.id() in [p.pub_key for p in n1.list_peers()]
for i in range(10):
time.sleep(0.5)
if n1.check_channel(n2) and n2.check_channel(n1):
return True
addr = bitcoind.rpc.getnewaddress("", "bech32")
bhash = bitcoind.rpc.generatetoaddress(1, addr)[0]
n1.block_sync(bhash)
n2.block_sync(bhash)
# Last ditch attempt
return n1.check_channel(n2) and n2.check_channel(n1) | bcbf895b286b446f7bb0ad2d7890a0fa902cdbd1 | 4,655 |
def has_permissions(**perms):
"""A :func:`check` that is added that checks if the member has any of
the permissions necessary.
The permissions passed in must be exactly like the properties shown under
:class:`discord.Permissions`.
Parameters
------------
perms
An argument list of permissions to check for.
Example
---------
.. code-block:: python
@bot.command()
@commands.has_permissions(manage_messages=True)
async def test():
await bot.say('You can manage messages.')
"""
def predicate(ctx):
msg = ctx.message
ch = msg.channel
permissions = ch.permissions_for(msg.author)
return all(getattr(permissions, perm, None) == value for perm, value in perms.items())
return check(predicate) | bf9432f136db8cd2643fe7d64807194c0479d3cd | 4,656 |
def extend_params(params, more_params):
"""Extends dictionary with new values.
Args:
params: A dictionary
more_params: A dictionary
Returns:
A dictionary which combines keys from both dictionaries.
Raises:
ValueError: if dicts have the same key.
"""
for yak in more_params:
if yak in params:
raise ValueError('Key "%s" is already in dict' % yak)
params.update(more_params)
return params | 626db0ae8d8a249b8c0b1721b7a2e0f1d4c084b8 | 4,657 |
import logging
def __compute_libdeps(node):
"""
Computes the direct library dependencies for a given SCons library node.
the attribute that it uses is populated by the Libdeps.py script
"""
if getattr(node.attributes, 'libdeps_exploring', False):
raise DependencyCycleError(node)
env = node.get_env()
deps = set()
node.attributes.libdeps_exploring = True
try:
try:
for child in env.Flatten(getattr(node.attributes, 'libdeps_direct',
[])):
if not child:
continue
deps.add(child)
except DependencyCycleError as e:
if len(e.cycle_nodes) == 1 or e.cycle_nodes[0] != e.cycle_nodes[
-1]:
e.cycle_nodes.insert(0, node)
logging.error("Found a dependency cycle" + str(e.cycle_nodes))
finally:
node.attributes.libdeps_exploring = False
return deps | 93e44b55bb187ae6123e22845bd4da69b260b107 | 4,658 |
def _AccumulatorResultToDict(partition, feature, grads, hessians):
"""Converts the inputs to a dictionary since the ordering changes."""
return {(partition[i], feature[i, 0], feature[i, 1]): (grads[i], hessians[i])
for i in range(len(partition))} | 20cc895cf936749a35c42a1158c9ea6645019e7d | 4,659 |
def scale_rotor_pots(rotors, scale_factor=((), None)):
""" scale the pots
"""
# Count numbers
numtors = 0
for rotor in rotors:
numtors += len(rotor)
# Calculate the scaling factors
scale_indcs, factor = scale_factor
nscale = numtors - len(scale_indcs)
if nscale > 0:
sfactor = factor**(2.0/nscale)
ioprinter.debug_message(
'scale_coeff test:', factor, nscale, sfactor)
for rotor in rotors:
for tidx, torsion in enumerate(rotor):
if tidx not in scale_indcs and factor is not None:
torsion.pot = automol.pot.scale(torsion.pot, sfactor)
# following is being used in a test to see how effective
# a scaling of fixed scan torsional pots can be
torsion.pot = automol.pot.relax_scale(torsion.pot)
return rotors | f89a04a86029debdef79d2d39ad3fb005d9a28a0 | 4,660 |
async def create(payload: ProductIn):
"""Create new product from sent data."""
product_id = await db.add_product(payload)
apm.capture_message(param_message={'message': 'Product with %s id created.', 'params': product_id})
return ProductOut(**payload.dict(), product_id=product_id) | 77f9ef1699cba57aa8e0cfd5a09550f6d03b8f72 | 4,661 |
def get_glove_info(glove_file_name):
"""Return the number of vectors and dimensions in a file in GloVe format."""
with smart_open(glove_file_name) as f:
num_lines = sum(1 for line in f)
with smart_open(glove_file_name) as f:
num_dims = len(f.readline().split()) - 1
return num_lines, num_dims | 4fde6a034197e51e3901b22c46d946330e2e213e | 4,662 |
from typing import Dict
from typing import List
def retrieve_database_inputs(db_session: Session) -> (
Dict[str, List[RevenueRate]], Dict[str, MergeAddress], List[Driver]):
"""
Retrieve the static inputs of the model from the database
:param db_session: SQLAlchemy Database connection session
:return: level of service mapped to List of RevenueRate objects, merge addresses mapped to MergeAddress objects,
List of driver objects
"""
revenue_table = load_revenue_table_from_db(db_session)
merge_details = load_merge_details_from_db(db_session)
drivers_table = load_drivers_from_db(db_session)
return revenue_table, merge_details, drivers_table | f5242680576d7e07b87fb8fd31e26efc1b0c30f0 | 4,663 |
def _evolve_cx(base_pauli, qctrl, qtrgt):
"""Update P -> CX.P.CX"""
base_pauli._x[:, qtrgt] ^= base_pauli._x[:, qctrl]
base_pauli._z[:, qctrl] ^= base_pauli._z[:, qtrgt]
return base_pauli | 5d0529bc4bfe74a122c24069eccb20fa2b69f153 | 4,664 |
def tp_pixel_num_cal(im, gt):
""" im is the prediction result;
gt is the ground truth labelled by biologists;"""
tp = np.logical_and(im, gt)
tp_pixel_num = tp.sum()
return tp_pixel_num | 197c1f64df3430cfbb6f45413b83360a1b9c44bf | 4,665 |
import time
def xsg_data(year=None, month=None,
retry_count=3, pause=0.001):
"""
获取限售股解禁数据
Parameters
--------
year:年份,默认为当前年
month:解禁月份,默认为当前月
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:解禁日期
count:解禁数量(万股)
ratio:占总盘比率
"""
year = du.get_year() if year is None else year
month = du.get_month() if month is None else month
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.XSG_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'],
ct.PAGES['emxsg'], year, month))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
except Exception as e:
print(e)
else:
da = lines[3:len(lines)-3]
list = []
for row in da.split('","'):
list.append([data for data in row.split(',')])
df = pd.DataFrame(list)
df = df[[1, 3, 4, 5, 6]]
for col in [5, 6]:
df[col] = df[col].astype(float)
df[5] = df[5]/10000
df[6] = df[6]*100
df[5] = df[5].map(ct.FORMAT)
df[6] = df[6].map(ct.FORMAT)
df.columns = rv.XSG_COLS
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG) | 0ca7070a63ec9ee58bb590b82d9bcdb8e4801d33 | 4,666 |
def crm_ybquery_v2():
"""
crm根据用户手机号查询subId
:return:
"""
resp = getJsonResponse()
try:
jsonStr = request.data
# 调用业务逻辑
resp = {"message":"","status":200,"timestamp":1534844188679,"body":{"password":"21232f297a57a5a743894a0e4a801fc3","username":"admin"},"result":{"id":"4291d7da9005377ec9aec4a71ea837f","name":"Ronald Thompson","username":"admin","password":"","avatar":"https://gw.alipayobjects.com/zos/rmsportal/jZUIxmJycoymBprLOUbT.png","status":1,"telephone":"","lastLoginIp":"127.0.0.1","lastLoginTime":1534837621348,"creatorId":"admin","createTime":1497160610259,"deleted":0,"roleId":"admin","token":"4291d7da9005377ec9aec4a71ea837f"}}
except BaseException as e:
current_app.logger.error("=========异常============")
current_app.logger.error(e)
current_app.logger.error("=========异常============")
resp = getJsonResponse(code="101", msg="系统异常" + str(e))
return jsonify(resp) | 02b7ff4e1f44643537b4549376aa637dcdbf5261 | 4,667 |
from typing import Dict
from typing import List
from typing import Union
from pathlib import Path
from typing import Iterable
from typing import Tuple
import tqdm
import logging
def get_split_file_ids_and_pieces(
data_dfs: Dict[str, pd.DataFrame] = None,
xml_and_csv_paths: Dict[str, List[Union[str, Path]]] = None,
splits: Iterable[float] = (0.8, 0.1, 0.1),
seed: int = None,
) -> Tuple[Iterable[Iterable[int]], Iterable[Piece]]:
"""
Get the file_ids that should go in each split of a split dataset.
Parameters
----------
data_dfs : Dict[str, pd.DataFrame]
If using dataframes, a mapping of 'files', 'measures', 'chords', and 'notes' dfs.
xml_and_csv_paths : Dict[str, List[Union[str, Path]]]
If using the MusicXML ('xmls') and label csvs ('csvs'), a list of paths of the
matching xml and csv files.
splits : Iterable[float]
An Iterable of floats representing the proportion of pieces which will go into each split.
This will be normalized to sum to 1.
seed : int
A numpy random seed, if given.
Returns
-------
split_ids : Iterable[Iterable[int]]
An iterable, the length of `splits` containing the file_ids for each data point in each
split.
pieces : Iterable[Iterable[Piece]]
The loaded Pieces of each split.
"""
assert sum(splits) != 0
splits = np.array(splits) / sum(splits)
if seed is not None:
np.random.seed(seed)
indexes = []
pieces = []
if data_dfs is not None:
for i in tqdm(data_dfs["files"].index):
file_name = (
f"{data_dfs['files'].loc[i].corpus_name}/{data_dfs['files'].loc[i].file_name}"
)
logging.info("Parsing %s (id %s)", file_name, i)
dfs = [data_dfs["chords"], data_dfs["measures"], data_dfs["notes"]]
names = ["chords", "measures", "notes"]
exists = [i in df.index.get_level_values(0) for df in dfs]
if not all(exists):
for exist, name in zip(exists, names):
if not exist:
logging.warning(
"%s_df does not contain %s data (id %s).", name, file_name, i
)
continue
try:
piece = get_score_piece_from_data_frames(
data_dfs["notes"].loc[i], data_dfs["chords"].loc[i], data_dfs["measures"].loc[i]
)
pieces.append(piece)
indexes.append(i)
except Exception:
logging.exception("Error parsing index %s", i)
continue
elif xml_and_csv_paths is not None:
for i, (xml_path, csv_path) in tqdm(
enumerate(zip(xml_and_csv_paths["xmls"], xml_and_csv_paths["csvs"])),
desc="Parsing MusicXML files",
total=len(xml_and_csv_paths["xmls"]),
):
piece = get_score_piece_from_music_xml(xml_path, csv_path)
pieces.append(piece)
indexes.append(i)
# Shuffle the pieces and the df_indexes the same way
shuffled_indexes = np.arange(len(indexes))
np.random.shuffle(shuffled_indexes)
pieces = np.array(pieces)[shuffled_indexes]
indexes = np.array(indexes)[shuffled_indexes]
split_pieces = []
split_indexes = []
prop = 0
for split_prop in splits:
start = int(round(prop * len(pieces)))
prop += split_prop
end = int(round(prop * len(pieces)))
length = end - start
if length == 0:
split_pieces.append([])
split_indexes.append([])
elif length == 1:
split_pieces.append([pieces[start]])
split_indexes.append([indexes[start]])
else:
split_pieces.append(pieces[start:end])
split_indexes.append(indexes[start:end])
return split_indexes, split_pieces | d01768fddcef9428e5dd3a22592dca8dd083fc9c | 4,668 |
def calc_full_dist(row, vert, hor, N, site_collection_SM):
"""
Calculates full distance matrix. Called once per row.
INPUTS:
:param vert:
integer, number of included rows
:param hor:
integer, number of columns within radius
:param N:
integer, number of points in row
:param site_collection_SM:
site collection object, for ShakeMap data
:returns:
dict, with following keys
grid_indices- indices of points included in distance matrix
distance_matrix- full distance matrix
"""
# gathers indices for full distance matrix for each row
grid_indices = [None]*(vert*(2*hor+1))
n_grid_indices = 0
for k in range(row-vert+1, row+1):
if k == row:
for j in range(0,hor+1):
grid_indices[n_grid_indices] = j + N*k
n_grid_indices += 1
else:
for j in range(0,2*hor+1):
grid_indices[n_grid_indices] = j + N*k
n_grid_indices += 1
del grid_indices[n_grid_indices:]
distance_matrix = np.zeros([np.size(grid_indices), np.size(grid_indices)])
# Create full distance matrix for row
for k in range(0, np.size(grid_indices)):
distance_matrix[k, k:] = geodetic_distance(
site_collection_SM.lons[grid_indices[k ]], site_collection_SM.lats[grid_indices[k]],
site_collection_SM.lons[grid_indices[k:]], site_collection_SM.lats[grid_indices[k:]]).flatten()
distance_matrix = distance_matrix + distance_matrix.T
return {'grid_indices':grid_indices, 'distance_matrix':distance_matrix} | e332b3b51cf4dadb764865f7c75eb361aa0cc100 | 4,669 |
def background_upload_do():
"""Handle the upload of a file."""
form = request.form
# Is the upload using Ajax, or a direct POST by the form?
is_ajax = False
if form.get("__ajax", None) == "true":
is_ajax = True
print form.items()
# Target folder for these uploads.
# target = os.sep.join(['app', 'static', 'photo_albums', 'Test', 'Dave'])
script_dir = os.path.dirname(os.path.abspath(__file__))
target = os.sep.join([script_dir, 'static', 'photo_albums', form.items()[0][1], form.items()[1][1]])
for upload in request.files.getlist("file"):
filename = upload.filename.rsplit(os.sep)[0]
if not os.path.exists(target):
print "Creating directory:", target
os.makedirs(target)
destination = os.sep.join([target, filename])
print "Accept incoming file:", filename
print "Save it to:", destination
upload.save(destination)
# if is_ajax:
return ajax_response(True, msg="DONE!")
# else:
# return redirect(url_for('upload')) | 267608fa9c93a75ca260eb742fed9023ec350b65 | 4,670 |
def catch_list(in_dict, in_key, default, len_highest=1):
"""Handle list and list of list dictionary entries from parameter input files.
Casts list entries of input as same type as default_val.
Assign default values if user does not provide a given input parameter.
Args:
in_dict: Dictionary in input parameters, within which in_key is a key.
in_key: Key of parameter to retrieve from in_dict.
default:
Default list to assign at output if in_key does not exist in in_dict.
The type of the list entries in default implicitly defines the type which the parameter
is cast to, if it exists in in_dict.
len_highest: Expected length of topmost list.
Returns:
Input parameter list retrieved from in_dict, or default if not provided.
"""
# TODO: needs to throw an error if input list of lists is longer than len_highest
# TODO: could make a recursive function probably, just hard to define appropriate list lengths at each level
list_of_lists_flag = type(default[0]) == list
try:
inList = in_dict[in_key]
if len(inList) == 0:
raise ValueError
# List of lists
if list_of_lists_flag:
val_list = []
for list_idx in range(len_highest):
# If default value is None, trust user
if default[0][0] is None:
val_list.append(inList[list_idx])
else:
type_default = type(default[0][0])
cast_in_list = [type_default(inVal) for inVal in inList[list_idx]]
val_list.append(cast_in_list)
# Normal list
else:
# If default value is None, trust user
if default[0] is None:
val_list = inList
else:
type_default = type(default[0])
val_list = [type_default(inVal) for inVal in inList]
except:
if list_of_lists_flag:
val_list = []
for list_idx in range(len_highest):
val_list.append(default[0])
else:
val_list = default
return val_list | e001d35a12f3826be78903f03b6e6539c8d07192 | 4,671 |
import os
import re
def _load_candidate_scorings (spec, input_dir_candidates, predictor):
"""
Load the msms-based scores for the candidates of the specified msms-spectra.
:param spec: string, identifier of the spectra and candidate list. Currently
we use the inchikey of the structure represented by the spectra.
:param input_dir_candidates: string, directory containing the scoring and
fingerprints of the candidates (compare also 'build_candidate_structure').
:param predictor: list of strings, containing the predictors used to train the model.
Currently only 'maccs' and 'maccsCount_f2dcf0b3' are supported.
:return: pandas.DataFrame, {"id1": [...], "score": [...]}
E.g.:
id1,score
"InChI=1S/C10H10N4O2S/c11-8-1-3-9(4-2-8)17...",0.601026809509167
"InChI=1S/C10H10N4O2S/c11-8-2-4-9(5-3-8)17...",0.59559886408
...
NOTE: 'id1' here to the InChI, this can be changed, but we also need to modify
'_process_single_candidate_list'.
"""
if predictor[0] == "maccs":
fps_fn = "maccs_binary"
elif predictor[0] == "maccsCount_f2dcf0b3":
fps_fn = "maccs_count"
else:
raise ValueError ("Unsupported predictor for candidates: %s" % predictor[0])
l_scoring_files = os.listdir (input_dir_candidates + "/scorings/" + fps_fn + "/")
scores_fn = list (filter (re.compile ("scoring_list.*=%s.csv" % spec).match, l_scoring_files))
assert (len (scores_fn) == 1)
scores_fn = input_dir_candidates + "/scorings/" + fps_fn + "/" + scores_fn[0]
# Return scores in descending order
return DataFrame.from_csv (scores_fn, index_col = None).sort_values ("score", ascending = False) | dfbc53dc0651cf427ca8e5050e4523c8779415d1 | 4,672 |
def load_dict_data(selected_entities=None, path_to_data_folder=None):
"""Loads up data from .pickle file for the selected entities.
Based on the selected entities, loads data from storage,
into memory, if respective files exists.
Args:
selected_entities: A list of string entity names to be loaded.
Default is load all available entitites.
path_to_data_folder: A string specifying the absolute path to
the data folder that contains the entity dataset files.
By default, uses the built-in entity datasets.
Returns:
A dictionary mapping entity type (key) to all entity values of
that type. Values are dictionary of dictionaries.
{
'genre': {
'comedy': {
'action': {1:1},
'drama': {1:1}
},
'action': {
'thriller': {1:1}
}
}
}
Always returns a dictionary. If .pickle files of selected entitites
are not found, or if no .pickle files are found, returns an empty
dictionary.
"""
return load_entities(
selected_entities=selected_entities, from_pickle=True,
path_to_data_folder=path_to_data_folder
) | 0236d69d6ed6c663c3bba5edabd59ced9755c546 | 4,673 |
def cart_del(request, pk):
""" remove an experiment from the analysis cart and return"""
pk=int(pk) # make integer for lookup within template
analyze_list = request.session.get('analyze_list', [])
if pk in analyze_list:
analyze_list.remove(pk)
request.session['analyze_list'] = analyze_list
return HttpResponseRedirect(request.META.get('HTTP_REFERER')) | 210a0fd58d9470aa365906420f3769b57815839a | 4,674 |
def get_block_devices(bdms=None):
"""
@type bdms: list
"""
ret = ""
if bdms:
for bdm in bdms:
ret += "{0}\n".format(bdm.get('DeviceName', '-'))
ebs = bdm.get('Ebs')
if ebs:
ret += " Status: {0}\n".format(ebs.get('Status', '-'))
ret += " Snapshot Id: {0}\n".format(ebs.get('SnapshotId', '-'))
ret += " Volume Size: {0}\n".format(ebs.get('VolumeSize', '-'))
ret += " Volume Type: {0}\n".format(ebs.get('VolumeType', '-'))
ret += " Encrypted: {0}\n".format(str(ebs.get('Encrypted', '-')))
ret += " Delete on Termination: {0}\n".format(ebs.get('DeleteOnTermination', '-'))
ret += " Attach Time: {0}\n".format(str(ebs.get('AttachTime', '-')))
return ret.rstrip()
else:
return ret | bd375f988b13d8fe5949ebdc994210136acc3405 | 4,675 |
from scipy import stats # lazy import
from pandas import DataFrame
def outlier_test(model_results, method='bonf', alpha=.05, labels=None,
order=False, cutoff=None):
"""
Outlier Tests for RegressionResults instances.
Parameters
----------
model_results : RegressionResults instance
Linear model results
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
labels : None or array_like
If `labels` is not None, then it will be used as index to the
returned pandas DataFrame. See also Returns below
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be sorted.
cutoff : None or float in [0, 1]
If cutoff is not None, then the return only includes observations with
multiple testing corrected p-values strictly below the cutoff. The
returned array or dataframe can be empty if there are no outlier
candidates at the specified cutoff.
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
if labels is None:
labels = getattr(model_results.model.data, 'row_labels', None)
infl = getattr(model_results, 'get_influence', None)
if infl is None:
results = maybe_unwrap_results(model_results)
raise AttributeError("model_results object %s does not have a "
"get_influence method." % results.__class__.__name__)
resid = infl().resid_studentized_external
if order:
idx = np.abs(resid).argsort()[::-1]
resid = resid[idx]
if labels is not None:
labels = np.asarray(labels)[idx]
df = model_results.df_resid - 1
unadj_p = stats.t.sf(np.abs(resid), df) * 2
adj_p = multipletests(unadj_p, alpha=alpha, method=method)
data = np.c_[resid, unadj_p, adj_p[1]]
if cutoff is not None:
mask = data[:, -1] < cutoff
data = data[mask]
else:
mask = slice(None)
if labels is not None:
return DataFrame(data,
columns=['student_resid', 'unadj_p', method+"(p)"],
index=np.asarray(labels)[mask])
return data | 39219cf5ad86f91cf6da15ea66dc2d18f0a371af | 4,676 |
def move(request, content_type_id, obj_id, rank):
"""View to be used in the django admin for changing a :class:`RankedModel`
object's rank. See :func:`admin_link_move_up` and
:func:`admin_link_move_down` for helper functions to incoroprate in your
admin models.
Upon completion this view sends the caller back to the referring page.
:param content_type_id:
``ContentType`` id of object being moved
:param obj_id:
ID of object being moved
:param rank:
New rank of the object
"""
content_type = ContentType.objects.get_for_id(content_type_id)
obj = get_object_or_404(content_type.model_class(), id=obj_id)
obj.rank = int(rank)
obj.save()
return HttpResponseRedirect(request.META['HTTP_REFERER']) | 0a8e73d83d7d7c575a8ed5abe43524b22d701a38 | 4,677 |
def test_second_playback_enforcement(mocker, tmp_path):
"""
Given:
- A mockable test
When:
- The mockable test fails on the second playback
Then:
- Ensure that it exists in the failed_playbooks set
- Ensure that it does not exists in the succeeded_playbooks list
"""
class RunIncidentTestMock:
call_count = 0
count_response_mapping = {
1: PB_Status.FAILED, # The first playback run
2: PB_Status.COMPLETED, # The record run
3: PB_Status.FAILED # The second playback run
}
@staticmethod
def run_incident_test(*_):
# First playback run
RunIncidentTestMock.call_count += 1
return RunIncidentTestMock.count_response_mapping[RunIncidentTestMock.call_count]
filtered_tests = ['mocked_playbook']
tests = [generate_test_configuration(playbook_id='mocked_playbook',
integrations=['mocked_integration'])]
integrations_configurations = [generate_integration_configuration('mocked_integration')]
secret_test_conf = generate_secret_conf_json(integrations_configurations)
content_conf_json = generate_content_conf_json(tests=tests)
build_context = get_mocked_build_context(mocker,
tmp_path,
secret_conf_json=secret_test_conf,
content_conf_json=content_conf_json,
filtered_tests_content=filtered_tests)
mocked_demisto_client = DemistoClientMock(integrations=['mocked_integration'])
server_context = generate_mocked_server_context(build_context, mocked_demisto_client, mocker)
mocker.patch('demisto_sdk.commands.test_content.TestContentClasses.TestContext._run_incident_test',
RunIncidentTestMock.run_incident_test)
server_context.execute_tests()
assert 'mocked_playbook (Second Playback)' in build_context.tests_data_keeper.failed_playbooks
assert 'mocked_playbook' not in build_context.tests_data_keeper.succeeded_playbooks | 314cbfb4f659b34adfdafb6b1c1153c8560249b0 | 4,678 |
import re
def decode_textfield_ncr(content):
"""
Decodes the contents for CIF textfield from Numeric Character Reference.
:param content: a string with contents
:return: decoded string
"""
def match2str(m):
return chr(int(m.group(1)))
return re.sub('&#(\d+);', match2str, content) | 28bf8017869d1ad47dce4362ec2b57131f587bba | 4,679 |
def reflect_or_create_tables(options):
"""
returns a dict of classes
make 'em if they don't exist
"tables" is {'wfdisc': mapped table class, ...}
"""
tables = {}
# this list should mirror the command line table options
for table in list(mapfns.keys()) + ['lastid']:
# if options.all_tables:
fulltabnm = getattr(options, table, None)
if fulltabnm:
try:
tables[table] = ps.get_tables(session.bind, [fulltabnm])[0]
except NoSuchTableError:
print("{0} doesn't exist. Adding it.".format(fulltabnm))
tables[table] = ps.make_table(fulltabnm, PROTOTYPES[table])
tables[table].__table__.create(session.bind, checkfirst=True)
return tables | 8974f6e6299240c69cf9deffdb3efb7ba9dc771f | 4,680 |
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_grpc_interface]
interface_dir=<<path to the parent directory of your Protocol Buffer (pb2) files>>
#<<package_name>>=<<communication_type>>, <<secure connection type>>, <<certificate_path or google API token>>
# 'package_name' is a CSV list of length 3, where each possible value is described in the documentation
# Note: to setup, in your interface_dir, create a sub-directory that has
# the same name as your package, and copy the Protocol Buffer pb2 files
# into that directory.
#
# If the package_name was 'helloworld', your app.config would look like:
# [fn_grpc_interface]
# interface_dir=/home/admin/integrations/grpc_interface_files
# helloworld=unary, None, None"""
return config_data | cb26012ff6ad1a2dbccbbcc5ef81c7a91def7906 | 4,681 |
import multiprocessing
import os
def get_workers_count_based_on_cpu_count():
"""
Returns the number of workers based available virtual or physical CPUs on this system.
"""
# Python 2.6+
try:
return multiprocessing.cpu_count() * 2 + 1
except (ImportError, NotImplementedError):
pass
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0:
return res * 2 + 1
except (AttributeError, ValueError):
pass
raise Exception('Can not determine number of CPUs on this system') | 0f42840196e596a371a78f16443b4bc22a89c460 | 4,682 |
def color_print(path: str, color = "white", attrs = []) -> None:
"""Prints colorized text on terminal"""
colored_text = colored(
text = read_warfle_text(path),
color = color,
attrs = attrs
)
print(colored_text)
return None | c3f587d929f350c86d166e809c9a63995063cf95 | 4,683 |
def create_cluster_spec(parameters_server: str, workers: str) -> tf.train.ClusterSpec:
"""
Creates a ClusterSpec object representing the cluster.
:param parameters_server: comma-separated list of hostname:port pairs to which the parameter servers are assigned
:param workers: comma-separated list of hostname:port pairs to which the workers are assigned
:return: a ClusterSpec object representing the cluster
"""
# extract the parameter servers and workers from the given strings
ps_hosts = parameters_server.split(",")
worker_hosts = workers.split(",")
# Create a cluster spec from the parameter server and worker hosts
cluster_spec = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
return cluster_spec | 2b4555b68821327451c48220e64bc92ecd5f3acc | 4,684 |
def bq_client(context):
"""
Initialize and return BigQueryClient()
"""
return BigQueryClient(
context.resource_config["dataset"],
) | 839a72d82b29e0e57f5973aee418360ef6b3e2fc | 4,685 |
def longascnode(x, y, z, u, v, w):
"""Compute value of longitude of ascending node, computed as
the angle between x-axis and the vector n = (-hy,hx,0), where hx, hy, are
respectively, the x and y components of specific angular momentum vector, h.
Args:
x (float): x-component of position
y (float): y-component of position
z (float): z-component of position
u (float): x-component of velocity
v (float): y-component of velocity
w (float): z-component of velocity
Returns:
float: longitude of ascending node
"""
res = np.arctan2(y*w-z*v, x*w-z*u) # remember atan2 is atan2(y/x)
if res >= 0.0:
return res
else:
return res+2.0*np.pi | d108847fa6835bc5e3ff70eb9673f6650ddf795a | 4,686 |
def volumetric_roi_info(atlas_spec):
"""Returns a list of unique ROIs, their labels and centroids"""
if is_image(atlas_spec) and is_image_3D(atlas_spec):
if atlas_spec.__class__ in nibabel.all_image_classes:
atlas_labels = atlas_spec.get_fdata()
else:
atlas_labels = np.array(atlas_spec)
elif isinstance(atlas_spec, str):
atlas_path, atlas_name = get_atlas_path(atlas_spec)
atlas_labels = nibabel.load(atlas_path).get_fdata()
else:
raise ValueError('Unrecognized atlas specification!'
'Must be a predefined name, or'
'a preloaded image!')
# TODO names for ROIs are not read and used!
uniq_rois, roi_size, num_nodes = roi_info(atlas_labels, freesurfer_annot=False)
centroids = dict()
for roi in uniq_rois:
centroids[roi] = np.median(np.nonzero(atlas_labels==roi), axis=1)
return uniq_rois, centroids, atlas_labels | 427d421e53712ddd34982e32c87d77918ecca716 | 4,687 |
def predictFuture(bo:board, sn:snake, snakes:list, foods:list):
"""
Play forward (futuremoves) turns
Check for enemy moves before calculating route boards
==
bo: boardClass as board
sn: snakeClass as snake
snakes: list[] of snakes
==
return: none
(set board routing)
board.updateCell()
"""
# Update / modify routes
# Enemy kills us -- playing forward X turns
numfuture = CONST.lookAheadPredictFuture
# Update enemy future paths to depth of N = numfuture
bo.getEnemyFuture(snakes, numfuture)
# Check for any possible futures where we die
(rr, avoid) = checkFutureDeath(bo, sn, snakes, numfuture=numfuture)
# Update board to avoid these cells
them = []
for sid in snakes:
enemy = snakes[sid]
if (enemy.getType() == 'enemy'):
enemy_head = enemy.getHead()
them.append(enemy_head)
else:
head = enemy.getHead()
# TODO: Further modify this to capture closer & larger
same = True # include squares where we get there same time
closest = bo.closestDist(head, them, same=same)
# Update squares to avoid because of head on collisions
# Update squares to avoid because of head on deadend(deprecate)
if (len(avoid)):
# If enemy can get to location before us
# If enemy kill path found
# print(avoid)
for path in avoid:
# Set future markov board, where
# * length of steps reflects turn, and
# * routing logic looks at next markov board
for step in path:
# print("PREDICT FUTURE DEATH", step)
for i in range(0, len(path)):
bo.updateCell(path[i], CONST.routeSolid/2, i, replace=True)
bo.updateCell(path[i], CONST.routeSolid/2, i+1, replace=True)
# bo.updateCell(step, CONST.routeSolid/4, len(path)+1, replace=True)
# Update eating for snakes based on path
# Update cells that enemy snakes can get to before us
for sid in snakes:
# Ignore us -- we have better calculation because we know our route
if (snakes[sid].getType() != "us"):
# Uses every possible path based on self.predictEnemyMoves
paths = snakes[sid].getNextSteps()
length = snakes[sid].getLength()
#
threat = CONST.routeSolid/2
for path in paths:
# Todo: only paint squares in each path based on turn
for pt in path:
# Print out hazard for N = enemy length or width of board
for turn in range(0, min(length, bo.width)):
if not closest[pt[0], pt[1]]:
bo.updateCell(pt, threat, turn)
# bo.updateCell(pt, CONST.routeSolid/(turn+1), turn)
# threat = int(threat / 5)
food_in_route = 0
# Check if there is food one square from them ..
try:
food_path = 0
# Check if food is in future moves
for path in paths:
pt = path[0] # truncate to first point only for now
if pt in foods:
food_in_route = 1
# print("DEBUG ENEMY PATH", sid, pt, path, food_path) # lookAheadPredictFuture
except:
pass
# Check if snake ate this turn (don't we already accommodate?)
snakes[sid].setEatingFuture(food_in_route)
# print(sid, food_in_route)
# Performance monitoring
return rr | 78020678691e41f447c25cb2bd807c9db7a04c86 | 4,688 |
def convert_to_distance(primer_df, tm_opt, gc_opt, gc_clamp_opt=2):
"""
Convert tm, gc%, and gc_clamp to an absolute distance
(tm_dist, gc_dist, gc_clamp_dist)
away from optimum range. This makes it so that all features will need
to be minimized.
"""
primer_df['tm_dist'] = get_distance(
primer_df.tm.values, tm_opt, tm_opt)
primer_df['gc_dist'] = get_distance(
primer_df.gc.values, gc_opt['min'], gc_opt['max'])
primer_df['gc_clamp_dist'] = get_distance(
primer_df.gc_clamp.values, gc_clamp_opt, gc_clamp_opt)
# primer_df.drop(['tm', 'gc', 'gc_clamp'], axis=1, inplace=True)
return primer_df | 4d556fd79c2c21877b3cb59712a923d5645b5eba | 4,689 |
import copy
def _tmap_error_detect(tmap: TensorMap) -> TensorMap:
"""Modifies tm so it returns it's mean unless previous tensor from file fails"""
new_tm = copy.deepcopy(tmap)
new_tm.shape = (1,)
new_tm.interpretation = Interpretation.CONTINUOUS
new_tm.channel_map = None
def tff(_: TensorMap, hd5: h5py.File, dependents=None):
return tmap.tensor_from_file(tmap, hd5, dependents).mean()
new_tm.tensor_from_file = tff
return new_tm | 263a16a5cb92e0a9c3d42357280eeb6d15a59773 | 4,690 |
def generate_dataset(config, ahead=1, data_path=None):
"""
Generates the dataset for training, test and validation
:param ahead: number of steps ahead for prediction
:return:
"""
dataset = config['dataset']
datanames = config['datanames']
datasize = config['datasize']
testsize = config['testsize']
vars = config['vars']
lag = config['lag']
btc = {}
# Reads numpy arrays for all sites and keep only selected columns
btcdata = np.load(data_path + 'bitcoin_price_history.npz')
for d in datanames:
btc[d] = btcdata[d]
if vars is not None:
btc[d] = btc[d][:, vars]
if dataset == 0:
return _generate_dataset_one_var(btc[datanames[0]][:, WEIGHTED_PRICE_INDEX].reshape(-1, 1), datasize, testsize, lag=lag, ahead=ahead)
# Just add more options to generate datasets with more than one variable for predicting one value
# or a sequence of values
raise NameError('ERROR: No such dataset type') | 89136efffbbd6e115b1d0b887fe7a3c904405bda | 4,691 |
def search(isamAppliance, name, check_mode=False, force=False):
"""
Search UUID for named Web Service connection
"""
ret_obj = get_all(isamAppliance)
return_obj = isamAppliance.create_return_object()
return_obj["warnings"] = ret_obj["warnings"]
for obj in ret_obj['data']:
if obj['name'] == name:
logger.info("Found Web Service connection {0} id: {1}".format(name, obj['uuid']))
return_obj['data'] = obj['uuid']
return_obj['rc'] = 0
return return_obj | f642e9e62203b490a347c21899d45968f6258eba | 4,692 |
def flask_app(initialize_configuration) -> Flask:
"""
Fixture for making a Flask instance, to be able to access application context manager.
This is not possible with a FlaskClient, and we need the context manager for creating
JWT tokens when is required.
@return: A Flask instance.
"""
flask_application = vcf_handler_api('TESTING')
flask_application.config['TESTING'] = True
flask_application.config['PROPAGATE_EXCEPTIONS'] = False
return flask_application | 265c912833025d13d06c2470443e68110ce4f60f | 4,693 |
import requests
def http_request(method, url_suffix, params=None, data=None, headers=HEADERS, safe=False):
"""
A wrapper for requests lib to send our requests and handle requests and responses better.
:type method: ``str``
:param method: HTTP method for the request.
:type url_suffix: ``str``
:param url_suffix: The suffix of the URL (endpoint)
:type params: ``dict``
:param params: The URL params to be passed.
:type data: ``str``
:param data: The body data of the request.
:type headers: ``dict``
:param headers: Request headers
:type safe: ``bool``
:param safe: If set to true will return None in case of http error
:return: Returns the http request response json
:rtype: ``dict``
"""
headers['Authorization'] = get_token()
url = BASE_URL + url_suffix
try:
res = requests.request(method, url, verify=USE_SSL, params=params, data=data, headers=headers)
# Try to create a new token
if res.status_code == 401:
headers['Authorization'] = get_token(new_token=True)
res = requests.request(method, url, verify=USE_SSL, params=params, data=data, headers=headers)
except requests.exceptions.RequestException:
return_error('Error in connection to the server. Please make sure you entered the URL correctly.')
# Handle error responses gracefully
if res.status_code not in {200, 201, 202}:
try:
result_msg = res.json()
finally:
reason = result_msg if result_msg else res.reason
err_msg = f'Error in API call. code:{res.status_code}; reason: {reason}'
if safe:
return None
return_error(err_msg)
return res.json() | 9fbd5123e4f1a39f5fa10fbc6a8f41db7ed1775b | 4,694 |
import time
import torch
import sys
def train(train_loader, model, criterion, average, optimizer, epoch, opt):
"""one epoch training"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
for i, (images, labels, _) in enumerate(train_loader):
data_time.update(time.time() - end)
# modify labels with their new indexes - we are not using all the labels anymore at the training
for ind, label in enumerate(labels):
labels[ind] = opt.original_index.index(label)
# images = torch.cat([images[0], images[1]], dim=0)
if torch.cuda.is_available():
images = images.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
#idxs = idxs.cuda(non_blocking=True)
bsz = labels.shape[0]
# warm-up learning rate
warmup_learning_rate(opt, epoch, i, len(train_loader), optimizer)
# compute loss
features, _ = model(images)
bs = features.size(0)
outs, prob = average(features, i)
loss = criterion(outs)
# update metric
losses.update(loss.item(), bsz)
# SGD
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print info
if (i + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, i + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
sys.stdout.flush()
return losses.avg | ad2f4379cf283c1716c9d3befb0ee2f50c28c081 | 4,695 |
def FP(target, prediction):
"""
False positives.
:param target: target value
:param prediction: prediction value
:return:
"""
return ((target == 0).float() * prediction.float().round()).sum() | 9c8b21ecbc4f48b737c92fbaf73ef820fe035218 | 4,696 |
import math
def get_angle(A, B, C):
"""
Return the angle at C (in radians) for the triangle formed by A, B, C
a, b, c are lengths
C
/ \
b / \a
/ \
A-------B
c
"""
(col_A, row_A) = A
(col_B, row_B) = B
(col_C, row_C) = C
a = pixel_distance(C, B)
b = pixel_distance(A, C)
c = pixel_distance(A, B)
try:
cos_angle = (math.pow(a, 2) + math.pow(b, 2) - math.pow(c, 2)) / (2 * a * b)
except ZeroDivisionError as e:
log.warning(
"get_angle: A %s, B %s, C %s, a %.3f, b %.3f, c %.3f" % (A, B, C, a, b, c)
)
raise e
# If CA and CB are very long and the angle at C very narrow we can get an
# invalid cos_angle which will cause math.acos() to raise a ValueError exception
if cos_angle > 1:
cos_angle = 1
elif cos_angle < -1:
cos_angle = -1
angle_ACB = math.acos(cos_angle)
# log.info("get_angle: A %s, B %s, C %s, a %.3f, b %.3f, c %.3f, cos_angle %s, angle_ACB %s" %
# (A, B, C, a, b, c, pformat(cos_angle), int(math.degrees(angle_ACB))))
return angle_ACB | 30e1681bf2c065c4094b2dd909322158a9968c3c | 4,697 |
def single_labels(interesting_class_id):
"""
:param interesting_class_id: integer in range [0,2] to specify class
:return: number of labels for the "interesting_class"
"""
def s_l(y_true, y_pred):
class_id_true = K.argmax(y_true, axis=-1)
accuracy_mask = K.cast(K.equal(class_id_true, interesting_class_id), 'int32')
return K.cast(K.maximum(K.sum(accuracy_mask), 1), 'int32')
return s_l | d137bbd4bba4bcb19e9bc296e4cecdbd7d8effe6 | 4,698 |
def get_iam_policy(client=None, **kwargs):
"""
service_account='string'
"""
service_account=kwargs.pop('service_account')
resp = client.projects().serviceAccounts().getIamPolicy(
resource=service_account).execute()
# TODO(supertom): err handling, check if 'bindings' is correct
if 'bindings' in resp:
return resp['bindings']
else:
return None | b777a317e9637a410b78847d05a6725f7600c04f | 4,699 |
Subsets and Splits