content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def load_empty_config_setup() -> DictConfig:
"""Return a dictionary containing all the MLOQ setup config values set to None."""
return OmegaConf.load(setup_yml.src)
|
e862fb753737990a23975659f7c026ca0a2e7132
| 27,280 |
from typing import Dict
from typing import Tuple
def apply_dfg(dfg: Dict[Tuple[str, str], int], start_activities: Dict[str, int], end_activities: Dict[str, int], activities: Dict[str, int], parameters=None, variant=DEFAULT_VARIANT_DFG) -> Tuple[PetriNet, Marking, Marking]:
"""
Apply the chosen IM algorithm to a DFG graph obtaining a Petri net along with an initial and final marking
Parameters
-----------
dfg
Directly-Follows graph
variant
Variant of the algorithm to apply, possible values:
- Variants.IMd
parameters
Parameters of the algorithm, including:
Parameters.ACTIVITY_KEY -> attribute of the log to use as activity name
(default concept:name)
Returns
-----------
net
Petri net
initial_marking
Initial marking
final_marking
Final marking
"""
return exec_utils.get_variant(variant).apply_dfg(dfg, start_activities=start_activities, end_activities=end_activities, activities=activities, parameters=parameters)
|
644b871c17bcf983067754588be54aecf20c5c40
| 27,281 |
def make_extrap_log_func(func, extrap_x_l=None):
"""
Generate a version of func that extrapolates to infinitely many gridpoints.
Note that extrapolation here is done on the *log* of the function result,
so this will fail if any returned values are < 0. It does seem to be better
behaved for SFS calculation.
func: A function whose last argument is the number of Numerics.default_grid
points to use in calculation and that returns a single scalar or
array.
extrap_x_l: An explict list of x values to use for extrapolation. If not
provided, the extrapolation routine will look for '.extrap_x'
attributes on the results of func. The method Spectrum.from_phi will
add an extrap_x attribute to resulting Spectra, equal to the x-value
of the first non-zero grid point. An explicit list is useful if you
want to override this behavior for testing.
Returns a new function whose last argument is a list of numbers of grid
points and that returns a result extrapolated to infinitely many grid
points.
"""
return make_extrap_func(func, extrap_x_l=extrap_x_l, extrap_log=True)
|
1ddac8d607b18cb3f392ac9a06ccfcc2608617d4
| 27,282 |
def get_logger(name):
"""每次调用,都是一个新的
"""
return LogCollector(name)
|
81cb8ad13bbf54ada444cc624b7fc521b2cb8943
| 27,283 |
from re import T
def ShowString(name, msg):
"""Return a html page listing a file and a 'back' button"""
return """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<html>
<head>
<title>%s</title>
</head>
<body>
<FORM><INPUT TYPE="BUTTON" VALUE="%s" ONCLICK="history.go(-1)"></FORM>
<h3>%s</h3>
<code><pre>%s</pre></code>
</body>
</html>
""" % (
xml_name(name),
T("Back"),
xml_name(name),
escape(msg),
)
|
4f1c837989c18180991dcfb6ecb93025d8a6c27d
| 27,284 |
def game_loop(screen, buttons, items, music, sound, g_settings, particles=None, percentthing=None):
"""Manage events, return a gamestate change if it happens, and update the screen"""
while True:
# Check and manage event queue
gs_change = gf.check_events(buttons, music, sound, g_settings)
# If we are returned a new gamestate from checking events, return the gamestate again to transition
if isinstance(gs_change, gf.GameState):
return gs_change
# Update all aspects on screen
gf.update_screen(screen, buttons, items, g_settings, particles, percentthing)
|
0fcd40402859bca8671a2856a9872bcd7fb6008e
| 27,285 |
def fake_request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
# begin test patch
inject_flag_file='./clientinject.dat'
if os.path.exists(inject_flag_file):
# open, read and inject data
replace_data=True
with open(inject_flag_file) as infile:
fake_response_data = yaml.load(infile)
if fake_response_data['url']: # we want to re.match the given url
replace_data=False # only replace on match
if re.match(fake_response_data['url'], response.url):
replace_data=True
if replace_data:
# replace resp[value] w/ the fake data
print "Fake response data: %s" % fake_response_data
for key, value in fake_response_data['data'].items():
setattr(response, key, value)
print 'Altered response values:'
for key, value in vars(response).items():
print "%s: %s" %(key, value)
# end test patch
return response
|
a2a1cd19fc657d2e0cece37954735d9c66e877c5
| 27,286 |
from typing import List
from typing import Dict
from typing import Any
from typing import OrderedDict
def assert_step_match(
step: Step,
expected_step_func: str,
expected_step_arguments: List[Dict[str, Any]],
step_registry: StepRegistry,
):
"""Assert that the Step correctly matches in the Registry"""
print(
"{} STEP '{}' SHOULD MATCH {}".format(
cf.orange(">>"),
cf.deepSkyBlue3("{} {}".format(step.keyword, step.text)),
cf.deepSkyBlue3(expected_step_func),
),
end=" ",
flush=True,
)
# match the step text from the config with one from the registry
try:
matcher.match_step(step, step_registry)
except StepImplementationNotFoundError:
print_failure(None, ["Expected Step Text didn't match any Step Implementation"])
return False
# check if Step matches the correct Step Implementation Function
matched_step_func = step.step_impl.func
if matched_step_func.__name__ != expected_step_func:
print_failure(
matched_step_func,
[
"Expected Step Text matched {} instead of {}".format(
matched_step_func.__name__, expected_step_func
)
],
)
return False
# check if the Step has a match with the correct arguments
if expected_step_arguments:
# merge the Step's keyword and positional arguments into one dict
args, kwargs = step.step_impl_match.evaluate()
actual_step_arguments = utils.get_func_pos_args_as_kwargs(
matched_step_func, args
)
actual_step_arguments.update(kwargs)
# turn the list of single-item-dicts to a multi-item dict
# -> [{1: 2}, {3: 4}] --> {1: 2, 3: 4}
# NOTE(TF) for Python 3.5 test reproducibility we need an OrderedDict -.^
expected_step_arguments = OrderedDict(
(
argpair
for argpairs in expected_step_arguments
for argpair in argpairs.items()
)
)
errors = assert_step_arguments(actual_step_arguments, expected_step_arguments)
if errors:
print_failure(matched_step_func, errors)
return False
print(cf.bold_forestGreen("✔"))
return True
|
0b156e6f7a1bf39b6fcc7805f0dcb9da30768e58
| 27,287 |
from typing import List
from typing import Dict
import requests
def get_grafana_dashboards_url(admin: bool) -> List[Dict]:
"""
Get a list of dashboard available to the tenant.
:admin (bool) A boolean representing admin status.
Return a list of dashboards dictionaries.
"""
urls = []
req = format_grafana_admin_request('/api/search?query=[Grafonnet]')
results = requests.get(req).json()
for item in results:
folder_title = item.get('folderTitle')
if admin or not folder_title or folder_title != 'admin':
item['url'] = format_grafana_frontend_request(item['url'])
urls.append(item)
return urls
|
0b28b9ef1333c633a001297f9c038fd5496952a8
| 27,288 |
def validate_comma_separated_list(argument):
"""Convert argument to a list."""
if not isinstance(argument, list):
argument = [argument]
last = argument.pop()
items = [i.strip(u' \t\n') for i in last.split(u',') if i.strip(u' \t\n')]
argument.extend(items)
return argument
|
bdf68db95d6070be4ffb5a74a646f5c730c726b4
| 27,289 |
def get_view(brain):
"""Setup for view persistence test"""
fig = brain._figures[0][0]
if mlab.options.backend == 'test':
return
fig.scene.camera.parallel_scale = 50
assert fig.scene.camera.parallel_scale == 50
view, roll = brain.show_view()
return fig.scene.camera.parallel_scale, view, roll
|
72295348921668e309aed5ac7c281dae7dea292a
| 27,290 |
def model_description(formula):
"""Interpret model formula and obtain a model description.
This function receives a string with a formula describing a statistical
model and returns an object of class ModelTerms that describes the
model interpreted from the formula.
Parameters
----------
formula: string
A string with a model description in formula language.
Returns
----------
An object of class ModelTerms with an internal description of the model.
"""
return Resolver(Parser(Scanner(formula).scan()).parse()).resolve()
|
fad391e86b31108694c3a784101ad8686f8b3292
| 27,291 |
def eintragen_kaeufe(kliste, id_zu_objekt, id_zu_profil):
""" bekommt eine Liste von dicts mit dem Inhalt von je einer Zeile der
registration-Tabelle der alten db. Außerdem ein mapping der produkt_id
der alten db zu model-Instanzen der neuen. Trägt entsprechende Käufe ein
und gibt dict produkt_id -> model-Instanz zurück
kompliziert ist die Zuordnung von format zu art; es gibt in der alten
db folgende formate der Käufe abhängig vom type vom produkt:
scholie: PDF, Kindle, ePub, Druck
antiquariat: Druck
programm: ''
seminar: '', vorOrt
salon: '', vorOrt, vor Ort, Stream
media-salon: '', Stream
media-vortrag: ''
media-vorlesung: '' """
def reg_zu_kauf(kauf):
""" nimmt eine Zeile der kliste und gibt objekt und art für den
zu erstellenden Kauf aus """
objekt, type_alt = id_zu_objekt[kauf['event_id']]
if type_alt in ['programm', 'seminar']:
art = 'teilnahme'
elif type_alt == 'antiquariat':
art = 'kaufen'
elif type_alt in ['scholie', 'buch']:
art = {'Druck': 'druck',
'PDF': 'pdf',
'': 'spam',
'ePub': 'epub',
'Kindle': 'mobi'}[kauf['format']]
if art=='spam':
if kauf['quantity']==1:
art = 'pdf'
else:
art = 'druck'
elif type_alt[:5] == 'media':
art = 'aufzeichnung'
elif type_alt == 'salon':
art = 'aufzeichnung' # ist falsch, aber zum Wohl des Kunden
return objekt, art
with transaction.atomic():
for kauf in kliste:
if kauf['reg_datetime'][0] == '0':
datum = '1111-11-11 11:11:11'
else:
datum = kauf['reg_datetime']
if kauf['reg_notes']:
kommentar = "Alte reg_id %s, notes %s" % (
kauf['reg_id'], kauf['reg_notes'])
else:
kommentar = "Aus alter DB mit reg_id %s" % kauf['reg_id']
objekt, art = reg_zu_kauf(kauf)
kunde = id_zu_profil[kauf['user_id']]
menge = kauf['quantity']
neu = Kauf.neuen_anlegen(objekt, art, menge, kunde, kommentar)
neu.zeit = datum
neu.save()
print('Kauf von %s durch %s angelegt' % (objekt, kunde.user))
|
68da4934335fefd64ffbfb9200afa81976037332
| 27,293 |
from datetime import datetime
def update_status(payload: Something, context: EventContext) -> Something:
"""
Updates status of payload to PROCESSED and puts previous status in history.
:param payload: Something, object
:param context: EventContext
"""
logger.info(context, "updating something status", extra=extra(something_id=payload.id))
if payload.status:
payload.history.append(payload.status)
payload.status = Status(
ts=datetime.now(timezone.utc),
type=StatusType.PROCESSED
)
return payload
|
7dbcec5930e657dfc3e654c4d1c8c970e9947906
| 27,294 |
def get_holdout_set(train, target_column):
"""This is a sample callable to demonstrate how the Environment's `holdout_dataset` is evaluated. If you do provide a
callable, it should expect two inputs: the train_dataset (pandas.DataFrame), and the target_column name (string). You should
return two DataFrames: a modified train_dataset, and a holdout_dataset. What happens in between is up to you, perhaps split
apart a portion of the rows, but the idea is to remove part of train_dataset, and turn it into holdout_dataset. For this
example, we'll just copy train_dataset, which is a VERY BAD IDEA in practice. Don't actually do this"""
return train, train.copy()
|
ba2ea647c287f11f37bc4557ef389ed288b0bb02
| 27,295 |
def field_type(value):
"""Return the type of the field, using the Ref object"""
if isinstance(value, Ref):
return "RefValue"
else:
return "StringValue"
|
f3165d87ecef0f13214e98856a10851061aea4f6
| 27,296 |
import torch
def test_read_covars_manual_input(tmp_observe_class,
covar_details_mapped_covar_mapped_names_tmp_observe_class,
additional_text, monkeypatch):
"""
test reading of covars from manual input by user. Monkeypatches reliance on function 'input'
"""
covariates = [1.1, 2.2, 200, -1.7]
# temp class to execute the test
cls = tmp_observe_class
# add attribute 'initial_guess' required for '_read_covars_manual'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
covar_tensor = torch.tensor([covariates], dtype=torch.double, device=device)
cls.initial_guess = covar_tensor
# add proposed_X attributed required for '_read_covars_manual'
cls.proposed_X = covar_tensor
# add attributes defining the covariate expectation
cls.covar_details = covar_details_mapped_covar_mapped_names_tmp_observe_class[0]
cls.covar_mapped_names = covar_details_mapped_covar_mapped_names_tmp_observe_class[1]
cls.sorted_pandas_columns = covar_details_mapped_covar_mapped_names_tmp_observe_class[2]
# monkeypatch
def mock_input(x): # mock function to replace 'input' for unit testing purposes
return ", ".join([str(x) for x in covariates])
monkeypatch.setattr("builtins.input", mock_input)
# run the test
# different tests for cases where it's supposed to pass vs fail
if isinstance(additional_text, str):
covars_candidate_float_tensor = cls._read_covars_manual_input(additional_text)
print(covars_candidate_float_tensor)
# assert that the right elements are returned in 'covars_candidate_float_tensor'
for i in range(covars_candidate_float_tensor.size()[1]):
assert covars_candidate_float_tensor[0, i].item() == covariates[i]
# cases where type of additonal_text should make test fail
else:
with pytest.raises(AssertionError) as e:
covars_candidate_float_tensor = cls._read_covars_manual_input(additional_text)
assert str(e.value) == "greattunes._observe._read_covars_manual_input: wrong datatype of parameter 'additional_text'. Was expecting 'str' but received " + str(type(additional_text))
|
46c62f0350edbecd001040b5c431862377b9afe8
| 27,297 |
def remove_overlapping_squares_v2(squares_dict, array_type):
"""
removes squares with min_x and min_y that are both within 40 pixels of each other
:param squares_dict: dict with overlapping squares
:param array_type: "Air_100" is the only one currently supported
:return: dict of squares and dataframe with x and y col/row assignments
"""
df = make_df_with_minx_miny(squares_dict)
new = df.drop_duplicates(subset=['min_x', 'min_y'])
x_values = assign_x_in_same_rows(new.sort_values('min_x'))
y_values = assign_y_in_same_columns(x_values)
squares, df = make_uniform_squares(y_values.sort_values(by=['x_groups', 'y_groups']), array_type=array_type)
return squares, df
|
5f13cda913ece68c0402e5c5aa003bffed50b0cd
| 27,298 |
def depth_to_space(x, scale, use_default=False):
"""Depth to space function."""
if use_default:
out = tf.depth_to_space(x, scale)
else:
b, h, w, c = list(map(int, x.shape))
out = tf.reshape(x, [b, h, w, scale, scale, -1])
out = tf.transpose(out, [0, 1, 3, 2, 4, 5])
out = tf.reshape(out, [b, h * scale, w * scale, -1])
return out
|
1c6f8c55fd9f7371ca7e69d91db44b86ffb81d45
| 27,299 |
def camelcase(key_name):
"""Convert snake-case to camel-case."""
parts = iter(key_name.split('_'))
return next(parts) + ''.join(i.title() for i in parts)
|
078adb6b7b014bf3b0a7dffb00f1ff321951f275
| 27,300 |
def calculate_sigmoid(alpha_value = 1, TDE = 0, sigma=.1):
"""
SOFTMAX VBDE: f(s, a, \sigma) = \frac{2e^{-|\alpha TDE| / \sigma}}{1-e^{-|\alpha TDE|/\sigma}}
:return:
"""
temp = np.exp(-np.abs(alpha_value *TDE + 1e-16) / sigma)
return 2*temp / (1-temp)
|
d030447197531a850c43d5c86b17aae698c806ff
| 27,301 |
def getprop(obj, string):
"""
Par exemple 'position.x'
:param string:
:return:
"""
tab = string.split('.')
curr_val = obj
for str in tab:
curr_val = getattr(curr_val, str)
return curr_val
|
c82f869395129a8b69d1a89cde97ce36fe5affd9
| 27,302 |
def extend(vals, inds, shape):
""" Makes an array of shape `shape` where indices `inds` have vales `vals`
"""
z = np.zeros(shape, dtype=vals.dtype)
z[inds] = vals
return z
|
fddcf709779ce139f1645e4292690fed6e0378f6
| 27,304 |
def check_model(model_name, model, model_type):
"""
Check the type of input `model` .
Args:
model_name (str): Name of model.
model (Object): Model object.
model_type (Class): Class of model.
Returns:
Object, if the type of `model` is `model_type`, return `model` itself.
Raises:
ValueError: If model is not an instance of `model_type` .
"""
if isinstance(model, model_type):
return model
msg = '{} should be an instance of {}, but got {}'.format(model_name, model_type, type(model).__name__)
LOGGER.error(TAG, msg)
raise TypeError(msg)
|
b6bd8b25cc2ea91e328ec5e17fd88f3f6e676e67
| 27,305 |
def compute_tnacs( hvib ):
"""
Computes the time-averaged nonadiabatic couplings for a given hvib.
hvib ( list of list of matrices ): The vibronic hamiltonian for all timesteps
returns: a matrix of time-averaged nonadiabatic couplings between all electronic states
in meV
"""
nstates = hvib[0].num_of_rows
nacs = data_stat.cmat_stat2( hvib, 1)
#nacs = data_stat.cmat_stat2( hvib, 2)
mb_tnacs = []
for i in range( nstates ):
mb_tnacs.append( [] )
for j in range( nstates ):
mb_tnacs[i].append( nacs.get(i,j).imag * 1000.0 / units.ev2Ha )
return np.array( mb_tnacs )
|
87a30d408a2a9a14fd4987edba66a47b95d4a0fe
| 27,307 |
import async_timeout
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up config entry."""
session = aiohttp_client.async_get_clientsession(hass)
hass.data.setdefault(DOMAIN, {})
printer = SyncThru(
entry.data[CONF_URL], session, connection_mode=ConnectionMode.API
)
async def async_update_data() -> SyncThru:
"""Fetch data from the printer."""
try:
async with async_timeout.timeout(10):
await printer.update()
except SyncThruAPINotSupported as api_error:
# if an exception is thrown, printer does not support syncthru
_LOGGER.info(
"Configured printer at %s does not provide SyncThru JSON API",
printer.url,
exc_info=api_error,
)
raise api_error
else:
# if the printer is offline, we raise an UpdateFailed
if printer.is_unknown_state():
raise UpdateFailed(
f"Configured printer at {printer.url} does not respond."
)
return printer
coordinator: DataUpdateCoordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=DOMAIN,
update_method=async_update_data,
update_interval=timedelta(seconds=30),
)
hass.data[DOMAIN][entry.entry_id] = coordinator
await coordinator.async_config_entry_first_refresh()
if isinstance(coordinator.last_exception, SyncThruAPINotSupported):
# this means that the printer does not support the syncthru JSON API
# and the config should simply be discarded
return False
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
configuration_url=printer.url,
connections=device_connections(printer),
identifiers=device_identifiers(printer),
model=printer.model(),
name=printer.hostname(),
)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
|
4dbecb1a63b228b689461f764c8e679c3a52f545
| 27,308 |
def center_binned_stats(*args, **kwargs):
"""
Same as scipy.stats.binned_statistic, but returns
the bin centers (matching length of `statistic`)
instead of the binedges.
See docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binned_statistic.html
"""
stat, binedges, binnumber = binned_statistic(*args,**kwargs)
bincenters = (binedges[:-1]+binedges[1:])/2.
return stat, bincenters, binedges, binnumber
|
adb762d874a19b4af09a49a2ad30b1b33d314be8
| 27,309 |
def create_account():
"""
Handles the user signing up to create an account
Redirects them to the log in page with a message after if successful
"""
signUpForm = forms.RegisterFormFactory()
if signUpForm.validate_on_submit():
netid,name,duke_email,phone_number,affiliation,password=extract_info(signUpForm)
#everything checked with vaildators so just can register user
register_user(netid,name,duke_email,phone_number,affiliation,password)
return redirect(url_for('rides.log_in_main'))
return render_template('registerLogInPages/sign-up.html', form=signUpForm)
|
321bcf6c779c6aa8857b9c4d054c18c5f611c5ad
| 27,310 |
import warnings
def tracks(track):
"""
Check if the submitted RGTs are valid
Arguments
---------
track: ICESat-2 reference ground track (RGT)
"""
#-- string length of RGTs in granules
track_length = 4
#-- total number of ICESat-2 satellite RGTs is 1387
all_tracks = [str(tr + 1).zfill(track_length) for tr in range(1387)]
if track is None:
return ["????"]
else:
if isinstance(track, (str,int)):
assert int(track) > 0, "Reference Ground Track must be positive"
track_list = [str(track).zfill(track_length)]
elif isinstance(track, list):
track_list = []
for t in track:
assert int(t) > 0, "Reference Ground Track must be positive"
track_list.append(str(t).zfill(track_length))
else:
raise TypeError(
"Reference Ground Track as a list or string"
)
#-- check if user-entered RGT is outside of the valid range
if not set(all_tracks) & set(track_list):
warnings.filterwarnings("always")
warnings.warn("Listed Reference Ground Track is not available")
return track_list
|
306b213fc040dbaabf515dfeff7efe45db656549
| 27,312 |
def submit_spark_job(project_id, region, cluster_name, job_id_output_path,
main_jar_file_uri=None, main_class=None, args=[], spark_job={}, job={},
wait_interval=30):
"""Submits a Cloud Dataproc job for running Apache Spark applications on YARN.
Args:
project_id (str): Required. The ID of the Google Cloud Platform project
that the cluster belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the
request.
cluster_name (str): Required. The cluster to run the job.
main_jar_file_uri (str): The HCFS URI of the jar file that contains the main class.
main_class (str): The name of the driver's main class. The jar file that
contains the class must be in the default CLASSPATH or specified in
jarFileUris.
args (list): Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
spark_job (dict): Optional. The full payload of a [SparkJob](
https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob).
job (dict): Optional. The full payload of a [Dataproc job](
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
wait_interval (int): The wait seconds between polling the operation.
Defaults to 30s.
job_id_output_path (str): Path for the ID of the created job
Returns:
The created job payload.
"""
if not spark_job:
spark_job = {}
if not job:
job = {}
if main_jar_file_uri:
spark_job['mainJarFileUri'] = main_jar_file_uri
if main_class:
spark_job['mainClass'] = main_class
if args:
spark_job['args'] = args
job['sparkJob'] = spark_job
return submit_job(project_id, region, cluster_name, job, wait_interval, job_id_output_path=job_id_output_path)
|
86494a8af89a93507d40c334a84ee279cc5f7250
| 27,313 |
def get_filter_ids(id_query):
"""Parses the `id` filter paramter from the url query.
"""
if id_query is None:
return None
filter_ids = id_query.split(',')
for filter_id in filter_ids:
validate_id(filter_id)
return filter_ids
|
08582368e4487c602160af42dbc6ffcd10c97075
| 27,315 |
import random
def random_sampling(predictions, number):
"""
This method will return us the next values that we need to labelise
for our training with a random prioritisation
Args:
predictions : A matrix of probabilities with all the predictions
for the unlabelled data
number : The number of indexes that we need to return
Returns:
The indexes that we need to labelised and enter in the training set
"""
return random.sample(range(len(predictions)), number)
|
22a1b13122bdf5c1b95d2b039458d27a62544f6d
| 27,316 |
def natsort_key_icase(s: str) -> str:
"""Split string to numeric and non-numeric fragments."""
return natsort_key(s.lower())
|
ae8bbbec4a7889c2737fbe10e62c69215398faf7
| 27,317 |
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
tf.logging.info("*** Features3 ***")
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
|
84a66d5e10032c5e6fffbbf0069e2e674f8ca8f8
| 27,318 |
def datetime_to_gps_format(t):
"""
Converts from a datetime to week number and time of week format.
NOTE: This does NOT convert between utc and gps time. The result
will still be in gps time (so will be off by some number of
leap seconds).
Parameters
----------
t : np.datetime64, pd.Timestamp, datetime.datetime
A datetime object (possibly an array) that is convertable to
datetime64 objects using pd.to_datetime (see the pandas docs
for more details).
Returns
--------
wn_tow : dict
Dictionary with attributes 'wn' and 'tow' corresponding to the
week number and time of week.
See also: tow_to_datetime
"""
t = pd.to_datetime(t)
delta = (t - GPS_WEEK_0)
# compute week number
wn = np.floor(delta.total_seconds() / WEEK_SECS).astype('int64')
# subtract the whole weeks from timedelta and get the remaining seconds
delta -= pd.to_timedelta(wn * WEEK_SECS, 's')
seconds = delta.total_seconds()
return {'wn': wn, 'tow': seconds}
|
66dfdb09534f3b425f50d0710c68819e067c89a4
| 27,319 |
def view(jinja_environment: Environment, name: str, *args, **kwargs):
"""
Returns a Response object with HTML obtained from synchronous rendering.
Use this when `enable_async` is set to False when calling `use_templates`.
"""
return get_response(
render_template(
jinja_environment.get_template(template_name(name)), *args, **kwargs
)
)
|
864c7106db19cc15cb37132c7755652b500f3ef2
| 27,320 |
from typing import List
import collections
def create_preprocess_fn(
vocab: List[str],
num_oov_buckets: int,
client_batch_size: int,
client_epochs_per_round: int,
max_sequence_length: int,
max_elements_per_client: int,
max_shuffle_buffer_size: int = 10000) -> tff.Computation:
"""Creates a preprocessing functions for Stack Overflow next-word-prediction.
This function returns a `tff.Computation` which takes a dataset and returns a
dataset, suitable for mapping over a set of unprocessed client datasets.
Args:
vocab: Vocabulary which defines the embedding.
num_oov_buckets: The number of out of vocabulary buckets. Tokens that are
not present in the `vocab` are hashed into one of these buckets.
client_batch_size: Integer representing batch size to use on the clients.
client_epochs_per_round: Number of epochs for which to repeat train client
dataset. Must be a positive integer.
max_sequence_length: Integer determining shape of padded batches. Sequences
will be padded up to this length, and sentences longer than this will be
truncated to this length.
max_elements_per_client: Integer controlling the maximum number of elements
to take per client. If -1, keeps all elements for each client. This is
applied before repeating `client_epochs_per_round`, and is intended
primarily to contend with the small set of clients with tens of thousands
of examples.
max_shuffle_buffer_size: Maximum shuffle buffer size.
Returns:
A `tff.Computation` taking as input a `tf.data.Dataset`, and returning a
`tf.data.Dataset` formed by preprocessing according to the input arguments.
"""
if client_batch_size <= 0:
raise ValueError('client_batch_size must be a positive integer. You have '
'passed {}.'.format(client_batch_size))
elif client_epochs_per_round <= 0:
raise ValueError('client_epochs_per_round must be a positive integer. '
'You have passed {}.'.format(client_epochs_per_round))
elif max_sequence_length <= 0:
raise ValueError('max_sequence_length must be a positive integer. You have '
'passed {}.'.format(max_sequence_length))
elif max_elements_per_client == 0 or max_elements_per_client < -1:
raise ValueError(
'max_elements_per_client must be a positive integer or -1. You have '
'passed {}.'.format(max_elements_per_client))
if num_oov_buckets <= 0:
raise ValueError('num_oov_buckets must be a positive integer. You have '
'passed {}.'.format(num_oov_buckets))
if (max_elements_per_client == -1 or
max_elements_per_client > max_shuffle_buffer_size):
shuffle_buffer_size = max_shuffle_buffer_size
else:
shuffle_buffer_size = max_elements_per_client
# Features are intentionally sorted lexicographically by key for consistency
# across datasets.
feature_dtypes = collections.OrderedDict(
creation_date=tf.string,
score=tf.int64,
tags=tf.string,
title=tf.string,
tokens=tf.string,
type=tf.string,
)
@tff.tf_computation(tff.SequenceType(feature_dtypes))
def preprocess_fn(dataset):
to_ids = build_to_ids_fn(
vocab=vocab,
max_sequence_length=max_sequence_length,
num_oov_buckets=num_oov_buckets)
dataset = dataset.take(max_elements_per_client).shuffle(
shuffle_buffer_size).repeat(client_epochs_per_round).map(
to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return batch_and_split(dataset, max_sequence_length, client_batch_size)
return preprocess_fn
|
1a7d114de3d979da4556123b185a00ca810b3f82
| 27,321 |
def get_element_parts(
original_list: list, splitter_character: str, split_index: int
) -> list:
"""
Split all elements of the passed list on the passed splitter_character.
Return the element at the passed index.
Parameters
----------
original_list : list
List of strings to be split.
splitter_character : str
Character to split the strings on.
split_index : int
Index of the element to be returned.
Returns
-------
list
List with the elements at the passed index.
"""
new_list = []
for element in original_list:
temp_element = element.rsplit(splitter_character)[split_index] # split element
temp_element = temp_element.strip() # clean data
temp_element = temp_element.casefold() # force lower case
new_list.append(temp_element)
return new_list
|
8c663fd64ebb1b2c53a64a17f7d63e842b457652
| 27,323 |
def logsumexp_masked(a, mask):
"""Returns row-wise masked log sum exp of a.
Uses the following trick for numeric stability:
log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
Args:
a: 2D tensor.
mask: 2D tensor.
"""
mask = tf.cast(mask, a.dtype)
a_max = tf.math.reduce_max(a * mask, axis=1, keepdims=True)
a = a - a_max
a_exp = tf.math.exp(a)
a_sum_exp = tf.math.reduce_sum(a_exp * mask, axis=1, keepdims=True)
return tf.squeeze(tf.math.log(a_sum_exp) + a_max)
|
86102b153e7ce912678a6381b38cb3a5168de3c8
| 27,325 |
def preprocess2(data: list, max_length: int, test_data: bool):
"""
입력을 받아서 딥러닝 모델이 학습 가능한 포맷으로 변경하는 함수입니다.
기본 제공 알고리즘은 char2vec이며, 기본 모델이 MLP이기 때문에, 입력 값의 크기를 모두 고정한 벡터를 리턴합니다.
문자열의 길이가 고정값보다 길면 긴 부분을 제거하고, 짧으면 0으로 채웁니다.
:param data: 문자열 리스트 ([문자열1, 문자열2, ...])
:param max_length: 문자열의 최대 길이
:return: 벡터 리스트 ([[0, 1, 5, 6], [5, 4, 10, 200], ...]) max_length가 4일 때
"""
query1 =[]
query2 =[]
for d in data:
q1,q2 = d.split('\t')
query1.append(q1)
query2.append(q2.replace('\n',''))
vectorized_data1 = [decompose_str_as_one_hot(datum, warning=False) for datum in query1]
vectorized_data2 = [decompose_str_as_one_hot(datum, warning=False) for datum in query2]
if test_data :
data_size = (len(data))
test_size = (int)(data_size * 0.03)
train_size = data_size - test_size
zero_padding1 = np.zeros((train_size, max_length), dtype=np.int32)
zero_padding2 = np.zeros((train_size, max_length), dtype=np.int32)
zero_padding1_test = np.zeros((test_size, max_length), dtype=np.int32)
zero_padding2_test = np.zeros((test_size, max_length), dtype=np.int32)
for idx, seq in enumerate(vectorized_data1):
if idx < train_size:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding1[idx, :length] = np.array(seq)[:length]
else:
zero_padding1[idx, :length] = np.array(seq)
else:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding1_test[idx - train_size, :length] = np.array(seq)[:length]
else:
zero_padding1_test[idx - train_size, :length] = np.array(seq)
for idx, seq in enumerate(vectorized_data2):
if idx < train_size:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding2[idx, :length] = np.array(seq)[:length]
else:
zero_padding2[idx, :length] = np.array(seq)
else:
length = len(seq)
if length >= max_length:
length = max_length
zero_padding2_test[idx - train_size, :length] = np.array(seq)[:length]
else:
zero_padding2_test[idx - train_size, :length] = np.array(seq)
return zero_padding1,zero_padding2, zero_padding1_test,zero_padding2_test, train_size
else:
data_size = (len(data))
test_size = (int)(data_size * 0.03)
train_size = data_size - test_size
zero_padding1 = np.zeros((data_size, max_length), dtype=np.int32)
zero_padding2 = np.zeros((data_size, max_length), dtype=np.int32)
for idx, seq in enumerate(vectorized_data1):
length = len(seq)
if length >= max_length:
length = max_length
zero_padding1[idx, :length] = np.array(seq)[:length]
else:
zero_padding1[idx, :length] = np.array(seq)
for idx, seq in enumerate(vectorized_data2):
length = len(seq)
if length >= max_length:
length = max_length
zero_padding2[idx, :length] = np.array(seq)[:length]
else:
zero_padding2[idx, :length] = np.array(seq)
return zero_padding1, zero_padding2
|
61810a385c935796f1d4f6e57c5f602cb0fa6f33
| 27,327 |
def create_inference_metadata(object_type, boundary_image, boundary_world):
"""
Create a metadata of **each** detected object
:param object_type: Type of the object | int
:param boundary: Boundary of the object in GCS - shape: 2(x, y) x points | np.array
:return: JSON object of each detected object ... python dictionary
"""
obj_metadata = {
"obj_type": object_type,
"obj_boundary_image": boundary_image
}
object_boundary = "POLYGON (("
for i in range(boundary_world.shape[1]):
object_boundary = object_boundary + str(boundary_world[0, i]) + " " + str(boundary_world[1, i]) + ", "
object_boundary = object_boundary + str(boundary_world[0, 0]) + " " + str(boundary_world[1, 0]) + "))"
# print("object_boundary: ", object_boundary)
obj_metadata["obj_boundary_world"] = object_boundary # string in wkt
# print("obj_metadata: " ,obj_metadata)
return obj_metadata
|
b13f1ad4abc22f3eaca2c81c56ab9cb0eae80aa9
| 27,328 |
def noauth_filter_factory(global_conf, forged_roles):
"""Create a NoAuth paste deploy filter
:param forged_roles: A space seperated list for roles to forge on requests
"""
forged_roles = forged_roles.split()
def filter(app):
return NoAuthFilter(app, forged_roles)
return filter
|
5a18d581616c6b4ae54b5d9c30095dd8734ed2e7
| 27,329 |
from typing import Tuple
from typing import Dict
def parse_line_protocol_stat_key(key: str) -> Tuple[str, Dict[str, str]]:
"""Parseline protocolish key to stat prefix and key.
Examples:
SNMP_WORKER;hostname=abc.com,worker=snmp-mti
will become:
("SNMP_WORKER", {"hostname": "abc.com", "worker": "snmp-mti"})
"""
try:
prefix, raw_labels = key.split(";", 1)
labels = dict(raw_label.split("=", 1) for raw_label in raw_labels.split(","))
return prefix, labels
except ValueError:
return key, {}
|
a6806f7dd67fb2a4734caca94bff3d974923f4b2
| 27,330 |
def negative_log_partial_likelihood(censor, risk):
"""Return the negative log-partial likelihood of the prediction
y_true contains the survival time
risk is the risk output from the neural network
censor is the vector of inputs that are censored
regularization is the regularization constant (not used currently in model)
Uses the Keras backend to perform calculations
Sorts the surv_time by sorted reverse time
"""
# calculate negative log likelihood from estimated risk
epsilon = 0.001
risk = K.reshape(risk, [-1]) # flatten
hazard_ratio = K.exp(risk)
# cumsum on sorted surv time accounts for concordance
log_risk = K.log(tf.cumsum(hazard_ratio)+epsilon)
log_risk = K.reshape(log_risk, [-1])
uncensored_likelihood = risk - log_risk
# apply censor mask: 1 - dead, 0 - censor
censored_likelihood = uncensored_likelihood * censor
num_observed_events = K.sum(censor)
neg_likelihood = - K.sum(censored_likelihood) / \
tf.cast(num_observed_events, tf.float32)
return neg_likelihood
|
e42a7cf32fd191efb91806ecf51fd7bf279595c6
| 27,331 |
import math
def approx_equal(x, y, tol=1e-12, rel=1e-7):
"""approx_equal(x, y [, tol [, rel]]) => True|False
Return True if numbers x and y are approximately equal, to within some
margin of error, otherwise return False. Numbers which compare equal
will also compare approximately equal.
x is approximately equal to y if the difference between them is less than
an absolute error tol or a relative error rel, whichever is bigger.
If given, both tol and rel must be finite, non-negative numbers. If not
given, default values are tol=1e-12 and rel=1e-7.
>>> approx_equal(1.2589, 1.2587, tol=0.0003, rel=0)
True
>>> approx_equal(1.2589, 1.2587, tol=0.0001, rel=0)
False
Absolute error is defined as abs(x-y); if that is less than or equal to
tol, x and y are considered approximately equal.
Relative error is defined as abs((x-y)/x) or abs((x-y)/y), whichever is
smaller, provided x or y are not zero. If that figure is less than or
equal to rel, x and y are considered approximately equal.
Complex numbers are not directly supported. If you wish to compare to
complex numbers, extract their real and imaginary parts and compare them
individually.
NANs always compare unequal, even with themselves. Infinities compare
approximately equal if they have the same sign (both positive or both
negative). Infinities with different signs compare unequal; so do
comparisons of infinities with finite numbers.
"""
if tol < 0 or rel < 0:
raise ValueError('error tolerances must be non-negative')
# NANs are never equal to anything, approximately or otherwise.
if math.isnan(x) or math.isnan(y):
return False
# Numbers which compare equal also compare approximately equal.
if x == y:
# This includes the case of two infinities with the same sign.
return True
if math.isinf(x) or math.isinf(y):
# This includes the case of two infinities of opposite sign, or
# one infinity and one finite number.
return False
# Two finite numbers.
actual_error = abs(x - y)
allowed_error = max(tol, rel*max(abs(x), abs(y)))
return actual_error <= allowed_error
|
45285d62e6fb3da403f3efd15b1f67df92cd345c
| 27,332 |
import json
def get_latest_enabled_scripts():
"""The ``/scripts/latest/enabled`` endpoint requires authentication.
It is used to get latest enabled scripts for all services submitted
by all teams including master/organizer where the team id will be
Null.
The JSON response is:
{
"scripts" : [List of {"id" : int,
"type": ("exploit", "benign", "getflag",
"setflag"),
"team_id": int or NULL (NULL if it's our
exploit),
"service_id" : int}]
}
:return: a JSON dictionary that contains all latest working scripts.
"""
cursor = mysql.cursor()
# First, we need to get the latest scripts submitted by each team for each service.
# Union that with all the scripts of administrator i.e get_flag/set_flag
cursor.execute("""SELECT MAX(id) as id, type, team_id, service_id
FROM scripts
WHERE current_state = 'enabled'
AND team_id IS NOT NULL
GROUP BY team_id, service_id, type
UNION
SELECT id, type, team_id, service_id
FROM scripts
WHERE current_state = 'enabled'
AND team_id IS NULL
GROUP BY team_id, service_id, type""")
return json.dumps({"scripts": cursor.fetchall()})
|
4430eaf9c7a0d0a82f5977850e46221e8b5998fe
| 27,334 |
def hanning(shape, dtype=np.float, device=backend.cpu_device):
"""Create multi-dimensional hanning window.
Args:
shape (tuple of ints): Output shape.
dtype (Dtype): Output data-type.
device (Device): Output device.
Returns:
array: hanning filter.
"""
device = backend.Device(device)
xp = device.xp
shape = _normalize_shape(shape)
with device:
window = xp.ones(shape, dtype=dtype)
for n, i in enumerate(shape[::-1]):
x = xp.arange(i, dtype=dtype)
w = 0.5 - 0.5 * xp.cos(2 * np.pi * x / max(1, (i - (i % 2))))
window *= w.reshape([i] + [1] * n)
return window
|
2192b4e75ceb52560865219553d27acd56bf4ef4
| 27,335 |
from typing import Union
def bitwise_and(x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray]) -> ivy.Array:
"""
Computes the bitwise AND of the underlying binary representation of each element x1_i of the input array x1 with
the respective element x2_i of the input array x2.
:param x1: first input array. Should have an integer or boolean data type.
:param x2: second input array. Must be compatible with x1 (see Broadcasting). Should have an integer or
boolean data type.
:return: an array containing the element-wise results. The returned array must have a data type determined
by Type Promotion Rules.
"""
return _cur_framework(x1, x2).bitwise_and(x1, x2)
|
662a706ba79e5f67958c78752197eff4c2a0a298
| 27,336 |
def add_without_punctuation(line, punctuation):
"""Returns the line cleaned of punctuation.
Param:
line (unicode)
Returns:
False if there are not any punctuation
Corrected line
"""
cleaned_line = line.translate(str.maketrans('', '', punctuation))
if line != cleaned_line:
return cleaned_line
else:
return False
|
20dafde21efad966f8ea1be0da928594e2ee5cc4
| 27,338 |
from typing import Callable
def there_is_zero(
f: Callable[[float], float], head: float, tail: float, subint: int
) -> bool:
"""
Checks if the function has a zero in [head, tail], looking at subint
subintervals
"""
length = tail - head
step = length / subint
t = head
a = f(head)
for i in range(1, subint + 1):
t += step
if a * f(t) <= 0:
return True
return False
|
dd80c55d4be5fed2e3100672ea63862014b0f8cc
| 27,339 |
def target_distribution_gen(name, parameter1, parameter2):
""" parameter1 is usually a parameter of distribution (not always relevant). parameter2 is usually noise."""
if name=="Fritz-visibility":
""" parameter2 is the visibility"""
ids = np.zeros((4,4,4)).astype(str)
p = np.zeros((4,4,4))
for i,j,k,l,m,n in product('01',repeat=6):
a = int(i+j,2)
b = int(k+l,2)
c = int(m+n,2)
temp0 = [str(a),str(b),str(c)]
temp = [a,b,c]
ids[temp[0],temp[1],temp[2]] = ''.join(temp0)
cspi8 = 1/(2*np.sqrt(2))
cos2pi8 = (2 + np.sqrt(2))/4
sin2pi8 = (2 - np.sqrt(2))/4
if m==j and n==l:
if n=='0':
if i==k:
p[temp[0],temp[1],temp[2]] = (1 - parameter2*(cos2pi8 - sin2pi8))/16
if i!=k:
p[temp[0],temp[1],temp[2]] = (1 + parameter2*(cos2pi8 - sin2pi8))/16
if n=='1':
if m=='0':
if i==k:
p[temp[0],temp[1],temp[2]] = 1/16 - cspi8 * parameter2/8
if i!=k:
p[temp[0],temp[1],temp[2]] = 1/16 + cspi8 * parameter2/8
if m=='1':
if i==k:
p[temp[0],temp[1],temp[2]] = 1/16 + cspi8 * parameter2/8
if i!=k:
p[temp[0],temp[1],temp[2]] = 1/16 - cspi8 * parameter2/8
p = p.flatten()
ids = ids.flatten()
if name=="Renou-visibility":
""" Info: If param_c >~ 0.886 or <~0.464, there is no classical 3-local model."""
""" In terms of c**2: above 0.785 or below 0.215 no classical 3-local model."""
c = parameter1
v = parameter2
p = np.array([
-(-1 + v)**3/64.,-((-1 + v)*(1 + v)**2)/64.,((-1 + v)**2*(1 + v))/64.,((-1 + v)**2*(1 + v))/64.,-((-1 + v)*(1 + v)**2)/64.,-((-1 + v)*(1 + v)**2)/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,
((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,((-1 + v)**2*(1 + v))/64.,((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,
(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,((-1 + v)**2*(1 + v))/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,-((-1 + v)*(1 + v)**2)/64.,-((-1 + v)*(1 + v)**2)/64.,((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,
-((-1 + v)*(1 + v)**2)/64.,-(-1 + v)**3/64.,((-1 + v)**2*(1 + v))/64.,((-1 + v)**2*(1 + v))/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,((-1 + v)**2*(1 + v))/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,((-1 + v)**2*(1 + v))/64.,
(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,((-1 + v)**2*(1 + v))/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,((-1 + v)**2*(1 + v))/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 + 3*(1 - 2*c**2)**2*v + 3*(1 - 2*c**2)**2*v**2 + (1 + 16*c**3*np.sqrt(1 - c**2) - 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,
(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 - 16*c**3*np.sqrt(1 - c**2) + 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,
(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 - 16*c**3*np.sqrt(1 - c**2) + 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,
(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 + 16*c**3*np.sqrt(1 - c**2) - 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,((-1 + v)**2*(1 + v))/64.,((1 + v)*(1 + (2 - 4*c**2)*v + v**2))/64.,
(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,((1 + v)*(1 + (-2 + 4*c**2)*v + v**2))/64.,((-1 + v)**2*(1 + v))/64.,
(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 + (-3 + 8*c**2 - 4*c**4)*v + (3 - 8*c**2 + 4*c**4)*v**2 - v**3)/64.,
(1 + v - 4*c**4*v + (-1 + 4*c**4)*v**2 - v**3)/64.,(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 - 16*c**3*np.sqrt(1 - c**2) + 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,
(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 + 16*c**3*np.sqrt(1 - c**2) - 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,
(1 + (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 - v**3)/64.,(1 - (1 - 2*c**2)**2*v - (1 - 2*c**2)**2*v**2 + (1 + 16*c**3*np.sqrt(1 - c**2) - 16*c**5*np.sqrt(1 - c**2))*v**3)/64.,
(1 + 3*(1 - 2*c**2)**2*v + 3*(1 - 2*c**2)**2*v**2 + (1 - 16*c**3*np.sqrt(1 - c**2) + 16*c**5*np.sqrt(1 - c**2))*v**3)/64.
])
ids = np.array([
"000", "001", "002", "003", "010", "011", "012", "013", "020", "021", \
"022", "023", "030", "031", "032", "033", "100", "101", "102", "103", \
"110", "111", "112", "113", "120", "121", "122", "123", "130", "131", \
"132", "133", "200", "201", "202", "203", "210", "211", "212", "213", \
"220", "221", "222", "223", "230", "231", "232", "233", "300", "301", \
"302", "303", "310", "311", "312", "313", "320", "321", "322", "323", \
"330", "331", "332", "333"
])
if name=="Renou-localnoise":
""" Info: If param_c >~ 0.886 or <~0.464, there is no classical 3-local model."""
""" In terms of c**2: above 0.785 or below 0.215 no classical 3-local model."""
param_c = parameter1
param_s = np.np.sqrt(1-param_c**2)
# the si and ci functions
param2_c = {'2':param_c, '3':param_s}
param2_s = {'2':param_s, '3':-1*param_c}
# First create noiseless Salman distribution.
ids = np.zeros((4,4,4)).astype(str)
p = np.zeros((4,4,4))
for a,b,c in product('0123',repeat=3):
temp0 = [a,b,c]
temp = [int(item) for item in temp0]
ids[temp[0],temp[1],temp[2]] = ''.join(temp0)
# p(12vi) et al.
if (a=='0' and b=='1' and c=='2') or (a=='1' and b=='0' and c=='3'):
p[temp[0],temp[1],temp[2]] = 1/8*param_c**2
elif (c=='0' and a=='1' and b=='2') or (c=='1' and a=='0' and b=='3'):
p[temp[0],temp[1],temp[2]] = 1/8*param_c**2
elif (b=='0' and c=='1' and a=='2') or (b=='1' and c=='0' and a=='3'):
p[temp[0],temp[1],temp[2]] = 1/8*param_c**2
elif (a=='0' and b=='1' and c=='3') or (a=='1' and b=='0' and c=='2'):
p[temp[0],temp[1],temp[2]] = 1/8*param_s**2
elif (c=='0' and a=='1' and b=='3') or (c=='1' and a=='0' and b=='2'):
p[temp[0],temp[1],temp[2]] = 1/8*param_s**2
elif (b=='0' and c=='1' and a=='3') or (b=='1' and c=='0' and a=='2'):
p[temp[0],temp[1],temp[2]] = 1/8*param_s**2
# p(vi vj vk) et al.
elif a in '23' and b in '23' and c in '23':
p[temp[0],temp[1],temp[2]] = 1/8 * (param2_c[a]*param2_c[b]*param2_c[c] + param2_s[a]*param2_s[b]*param2_s[c])**2
else:
p[temp[0],temp[1],temp[2]] = 0
# Let's add local noise.
new_values = np.zeros_like(p)
for a,b,c in product('0123',repeat=3):
temp0 = [a,b,c]
temp = [int(item) for item in temp0]
new_values[temp[0],temp[1],temp[2]] = (
parameter2**3 * p[temp[0],temp[1],temp[2]] +
parameter2**2*(1-parameter2) * 1/4 * ( np.sum(p,axis=2)[temp[0],temp[1]] + np.sum(p,axis=0)[temp[1],temp[2]] + np.sum(p,axis=1)[temp[0],temp[2]] ) +
parameter2*(1-parameter2)**2 * 1/16 * ( np.sum(p,axis=(1,2))[temp[0]] + np.sum(p,axis=(0,2))[temp[1]] + np.sum(p,axis=(0,1))[temp[2]] ) +
(1-parameter2)**3 * 1/64
)
p = new_values.flatten()
ids = ids.flatten()
if name=="elegant-visibility":
""" Recreating the elegant distribution with visibility v (parameter2) in each singlet. """
ids = np.zeros((4,4,4)).astype(str)
for a,b,c in product('0123',repeat=3):
temp0 = [a,b,c]
temp = [int(item) for item in temp0]
ids[temp[0],temp[1],temp[2]] = ''.join(temp0)
ids = ids.flatten()
p = np.array([1/256 *(4+9 *parameter2+9 *parameter2**2+3 *parameter2**3),1/256 *(4+parameter2-3 *parameter2**2-parameter2**3),1/256 *(4+parameter2-3 *parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+9*parameter2+9*parameter2**2+3*parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+9*parameter2+9*parameter2**2+3*parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4-3*parameter2+3*parameter2**2+parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+parameter2-3*parameter2**2-parameter2**3),1/256 *(4+9*parameter2+9*parameter2**2+3*parameter2**3)])
if name=="elegant-localnoise":
""" Recreating the elegant distribution, with each detector having 1-v (1-parameter2) chance of outputting a uniformly random output, and v chance of working properly. """
ids = np.zeros((4,4,4)).astype(str)
p = np.zeros((4,4,4))
for a,b,c in product('0123',repeat=3):
temp0 = [a,b,c]
temp = [int(item) for item in temp0]
ids[temp[0],temp[1],temp[2]] = ''.join(temp0)
if (a==b) and (b==c):
p[temp[0],temp[1],temp[2]] = 25/256
elif (a==b and b!=c) or (b==c and c!=a) or (c==a and a!=b):
p[temp[0],temp[1],temp[2]] = 1/256
else:
p[temp[0],temp[1],temp[2]] = 5/256
# Let's add local noise.
new_values = np.zeros_like(p)
for a,b,c in product('0123',repeat=3):
temp0 = [a,b,c]
temp = [int(item) for item in temp0]
new_values[temp[0],temp[1],temp[2]] = (
parameter2**3 * p[temp[0],temp[1],temp[2]] +
parameter2**2*(1-parameter2) * 1/4 * ( np.sum(p,axis=2)[temp[0],temp[1]] + np.sum(p,axis=0)[temp[1],temp[2]] + np.sum(p,axis=1)[temp[0],temp[2]] ) +
parameter2*(1-parameter2)**2 * 1/16 * ( np.sum(p,axis=(1,2))[temp[0]] + np.sum(p,axis=(0,2))[temp[1]] + np.sum(p,axis=(0,1))[temp[2]] ) +
(1-parameter2)**3 * 1/64
)
p=new_values.flatten()
ids = ids.flatten()
assert (np.abs(np.sum(p)-1.0) < (1E-6)),"Improperly normalized p!"
return p
|
475ee931c64f1e0bb0f6a6494aeacb25283adb9f
| 27,340 |
def module_code(
sexpr,
name: str = "<unknown>",
filename: str = "<unknown>",
lineno: int = 1,
doc: str = "",
):
"""Create a module's code object from given metadata and s-expression.
"""
module_builder = Builder(
ScopeSolver.outermost(), [], SharedState(doc, lineno, filename)
)
# incompletely build instruction
scheduling(module_builder.eval(sexpr))
# resolve symbols, complete building requirements
module_builder.sc.resolve()
# complete building requirements
instructions = module_builder.build()
code = make_code_obj(name, filename, lineno, doc, [], [], [], instructions)
return code
|
b1baf9ab8355fadd15c6763c569399aca62b9994
| 27,341 |
def acknowledgements():
"""Provides acknowlegements for the JRO instruments and experiments
Returns
-------
ackn : str
String providing acknowledgement text for studies using JRO data
"""
ackn = ' '.join(["The Jicamarca Radio Observatory is a facility of the",
"Instituto Geofisico del Peru operated with support from",
"the NSF AGS-1433968 through Cornell University."])
return ackn
|
013727319d43baaec57461995af8a683b5f02278
| 27,342 |
def list_vmachines(vdc):
"""
Returns:
list: vmachines info
"""
return vdc.to_dict()["vmachines"]
|
b3ce74c5b6f7d6d9f109a884f0c050ffae840e70
| 27,343 |
def reflection_matrix(v):
"""
The reflection transformation about a plane with normal vector `v`.
"""
n = len(v)
v = np.array(v)[np.newaxis]
return np.eye(n) - 2 * np.dot(v.T, v)
|
0b56f21e95162720e4856ac2ee995570c0231d4f
| 27,344 |
def is_valid_furl_or_file(furl_or_file):
"""Validate a FURL or a FURL file.
If ``furl_or_file`` looks like a file, we simply make sure its directory
exists and that it has a ``.furl`` file extension. We don't try to see
if the FURL file exists or to read its contents. This is useful for
cases where auto re-connection is being used.
"""
if is_valid_furl(furl_or_file) or is_valid_furl_file(furl_or_file):
return True
else:
return False
|
452617b469006ec5bf4d341af2163dc3b4f69bf7
| 27,346 |
import random
def stride(input_data, input_labels):
"""
Takes an input waterfall visibility with labels and strides across frequency,
producing (Nchan - 64)/S new waterfalls to be folded.
"""
spw_hw = 32 # spectral window half width
nchans = 1024
fold = nchans / (2 * spw_hw)
sample_spws = random.sample(range(0, 60), fold)
x = np.array(
[
input_data[:, i - spw_hw : i + spw_hw]
for i in range(spw_hw, 1024 - spw_hw, (nchans - 2 * spw_hw) / 60)
]
)
x_labels = np.array(
[
input_labels[:, i - spw_hw : i + spw_hw]
for i in range(spw_hw, 1024 - spw_hw, (nchans - 2 * spw_hw) / 60)
]
)
X = np.array([x[i].T for i in sample_spws])
X_labels = np.array([x_labels[i].T for i in sample_spws])
X_ = X.reshape(-1, 60).T
X_labels = X_labels.reshape(-1, 60).T
return X_, X_labels
|
f490df82e65356443d6fe8a951a0ad282ca1c2af
| 27,347 |
import DominantSparseEigenAD.Lanczos as lanczos
import torch
def chiF_sparseAD(model, k):
"""
Compute chi_F using the DominantSparseSymeig primitive, where the matrix
to be diagonalized is "sparse" and represented as a function.
"""
lanczos.setDominantSparseSymeig(model.H, model.Hadjoint_to_gadjoint)
dominant_sparse_symeig = lanczos.DominantSparseSymeig.apply
E0, psi0 = dominant_sparse_symeig(model.g, k, model.dim)
logF = torch.log(psi0.detach().matmul(psi0))
dlogF, = torch.autograd.grad(logF, model.g, create_graph=True)
d2logF, = torch.autograd.grad(dlogF, model.g)
chiF = -d2logF.item()
return E0, psi0, chiF
|
acb820da75218dd37d805867ed246ca9ec2efad2
| 27,349 |
def sum_digits(y):
"""Sum all the digits of y.
>>> sum_digits(10) # 1 + 0 = 1
1
>>> sum_digits(4224) # 4 + 2 + 2 + 4 = 12
12
>>> sum_digits(1234567890)
45
>>> a = sum_digits(123) # make sure that you are using return rather than print
>>> a
6
"""
"*** YOUR CODE HERE ***"
sum = 0
while(y > 0):
sum += y % 10
y = y // 10
return sum
|
5300e5bdbb058c4cc8d4a57155b114eab31b1935
| 27,350 |
import scipy
def leastsq(error_func, x0, *args, **options):
"""Find the parameters that yield the best fit for the data.
`x0` can be a sequence, array, Series, or Params
Positional arguments are passed along to `error_func`.
Keyword arguments are passed to `scipy.optimize.leastsq`
error_func: function that computes a sequence of errors
x0: initial guess for the best parameters
args: passed to error_func
options: passed to leastsq
:returns: Params object with best_params and ModSimSeries with details
"""
# override `full_output` so we get a message if something goes wrong
options["full_output"] = True
# run leastsq
t = scipy.optimize.leastsq(error_func, x0=x0, args=args, **options)
best_params, cov_x, infodict, mesg, ier = t
# pack the results into a ModSimSeries object
details = ModSimSeries(infodict)
details.set(cov_x=cov_x, mesg=mesg, ier=ier)
# if we got a Params object, we should return a Params object
if isinstance(x0, Params):
best_params = Params(Series(best_params, x0.index))
# return the best parameters and details
return best_params, details
|
d949358016ddab5d650ca1bab5c98e4ae124c153
| 27,351 |
def atomic_token_partition(value):
"""Partition given value on a token that appears resolvable(contains no
sub tokens). Returns in a tuple: (before_token, token, after_token).
Returned token includes token syntax. If no tokens are found, returned
tuple contains None in all values.
:param value: text to find a token from, and partition
:type value: str
:return: before_token, token, after_token
:rtype: tuple(str, str, str)
"""
before, sep, after_bef = value.rpartition(TOKEN_PREFIX)
if not sep:
return (None, None, None)
token, sep, after = after_bef.partition(TOKEN_SUFFIX)
if not sep:
# msg = 'bad resolve formatting, cannot find closer for {}'
# msg = msg.format(before + tokens.TOKEN_PREFIX)
# logger.error(msg)
return (None, None, None)
return before, make_token_str(token), after
|
1a4b0b952dcaa65c6f04d066685a609f1bd03669
| 27,352 |
def add_color_bar(img, space, cv):
"""
args:
img: (ndarray) in [img_rows, img_cols, channels], dtype as unit8
space: (int) pixels of space
cv: (int) color value in [0, 255]
return:
tmp_img: (ndarray) processed img
"""
assert len(img.shape) == 3, "img should be 3D"
img_rows, img_cols, channels = img.shape
tmp_img = np.ones((img_rows + 2 * space,
img_cols + 2 * space,
channels), np.uint8) * cv
tmp_img[space: space + img_rows,
space: space + img_cols] = img
return tmp_img
|
a08fc1eac525dd4156add949bc4cf706ee1ea299
| 27,353 |
import json
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0] or response[0] == '\n':
body = []
elif 'show run' in command:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
|
7d65bd1d19a837b5e78d0e1d834f2a00f4815cdb
| 27,354 |
def zero_at(pos, size=8):
"""
Create a size-bit int which only has one '0' bit at specific position.
:param int pos: Position of '0' bit.
:param int size: Length of value by bit.
:rtype: int
"""
assert 0 <= pos < size
return 2**size - 2**(size - pos - 1) - 1
|
7ebdcc1ac9db4ad934108f67a751b336b4f18011
| 27,355 |
def get_authz_token(request, user=None, access_token=None):
"""Construct AuthzToken instance from session; refresh token if needed."""
if access_token is not None:
return _create_authz_token(request, user=user, access_token=access_token)
elif is_request_access_token(request):
return _create_authz_token(request, user=user)
elif is_session_access_token(request) and not is_session_access_token_expired(request, user=user):
return _create_authz_token(request, user=user, access_token=access_token)
elif not is_refresh_token_expired(request):
# Have backend reauthenticate the user with the refresh token
user = authenticate(request)
if user:
return _create_authz_token(request, user=user)
return None
|
84a7f06085877b51181cadaf29da4dc0611c636b
| 27,356 |
import torch
def get_bert_model():
"""
Load uncased HuggingFace model.
"""
bert_model = torch.hub.load('huggingface/pytorch-transformers',
'model',
'bert-base-uncased')
return bert_model
|
51b49255fe4b1291d538251c8c199bd570fb1a31
| 27,357 |
import bio_utils.bio as bio
import tqdm
def get_bitseq_estimates(
config,
isoform_strategy,
bitseq_id_field='transcript_id',
strings_to_remove=['.cds-only', '.merged']):
""" Load the bitseq abundance estimates into a single long data frame.
Parameters
----------
config: dict-like
The configuration for the project, presumably from the yaml file
isoform_strategy: str
The strategy for handling transcript isoforms
bitseq_id_field: str
Name for the "transcript_id" field (second column) in bitseq tr file
strings_to_remove: list of strings
A list of strings to replace with "" in the bitseq ids
Returns
-------
bitseq_estimates: pd.DataFrame
A data frame containing the following columns
* rpkm_{mean,var}: the bitseq estimates
* sample: the name of the respective sample
* type: "ribo" or "rna"
"""
msg = "Reading the bitseq tr info file"
logger.info(msg)
# check which transcript file to load
is_merged = False
if isoform_strategy == "merged":
is_merged = True
# and get the file
transcript_fasta = filenames.get_transcript_fasta(
config['genome_base_path'],
config['genome_name'],
is_annotated=True,
is_merged=is_merged,
is_cds_only=True
)
tr_info = filenames.get_bitseq_transcript_info(transcript_fasta)
bitseq_tr = bio.read_bitseq_tr_file(tr_info)
# we need to remove all of the indicated strings from the ids
for to_remove in strings_to_remove:
tids = bitseq_tr['transcript_id'].str.replace(to_remove, "")
bitseq_tr['transcript_id'] = tids
bitseq_tr = bitseq_tr.rename(columns={'transcript_id': bitseq_id_field})
note = config.get('note', None)
all_dfs = []
msg = "Reading riboseq BitSeq estimates"
logger.info(msg)
is_unique = 'keep_riboseq_multimappers' not in config
it = tqdm.tqdm(config['riboseq_samples'].items())
for name, file in it:
lengths, offsets = get_periodic_lengths_and_offsets(
config,
name,
isoform_strategy=isoform_strategy,
is_unique=is_unique
)
bitseq_rpkm_mean = filenames.get_riboseq_bitseq_rpkm_mean(
config['riboseq_data'],
name,
is_unique=is_unique,
is_transcriptome=True,
is_cds_only=True,
length=lengths,
offset=offsets,
isoform_strategy=isoform_strategy,
note=note
)
field_names = ['rpkm_mean', 'rpkm_var']
bitseq_rpkm_mean_df = bio.read_bitseq_means(
bitseq_rpkm_mean,
names=field_names
)
bitseq_rpkm_mean_df['sample'] = name
bitseq_rpkm_mean_df['type'] = 'ribo'
bitseq_rpkm_mean_df[bitseq_id_field] = bitseq_tr[bitseq_id_field]
all_dfs.append(bitseq_rpkm_mean_df)
# now, the rnaseq
msg = "Reading RNA-seq BitSeq estimates"
logger.info(msg)
is_unique = ('remove_rnaseq_multimappers' in config)
it = tqdm.tqdm(config['rnaseq_samples'].items())
for name, data in it:
bitseq_rpkm_mean = filenames.get_rnaseq_bitseq_rpkm_mean(
config['rnaseq_data'],
name,
is_unique=is_unique,
is_transcriptome=True,
is_cds_only=True,
isoform_strategy=isoform_strategy,
note=note
)
field_names = ['rpkm_mean', 'rpkm_var']
bitseq_rpkm_mean_df = bio.read_bitseq_means(
bitseq_rpkm_mean,
names=field_names
)
bitseq_rpkm_mean_df['sample'] = name
bitseq_rpkm_mean_df['type'] = 'rna'
bitseq_rpkm_mean_df[bitseq_id_field] = bitseq_tr[bitseq_id_field]
all_dfs.append(bitseq_rpkm_mean_df)
msg = "Joining estimates into long data frame"
logger.info(msg)
long_df = pd.concat(all_dfs)
long_df = long_df.reset_index(drop=True)
return long_df
|
93738faf48d82c6989411c3034271da1224490b0
| 27,358 |
from pathlib import Path
def na_layout(na_layout_path: Path) -> ParadigmLayout:
"""
Returns the parsed NA layout.
"""
with na_layout_path.open(encoding="UTF-8") as layout_file:
return ParadigmLayout.load(layout_file)
|
35e8405b5e17ab4da917d48ef89779c7d081791e
| 27,359 |
def load_atomic_data_for_training(in_file, categories, tokenizer, max_input_length, max_output_length):
"""
Loads an ATOMIC dataset file and
:param in_file: CSV ATOMIC file
:param categories: ATOMIC category list
:param tokenizer: LM tokenizer
:return: a list of tuples
"""
examples = load_atomic_data(in_file, categories)
examples = [(f"{e1} <{cat}>", f"{e2} <eos>")
for e1, e1_relations in examples.items()
for cat, e2s in e1_relations.items()
for e2 in e2s]
process = lambda s: tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s))
examples = [tuple(map(process, ex)) for ex in examples]
# Pad
max_input_length = min(max_input_length, max([len(ex[0]) for ex in examples]))
max_output_length = min(max_output_length, max([len(ex[1]) for ex in examples]))
max_length = max_input_length + max_output_length + 1
input_lengths = [len(ex[0]) for ex in examples]
examples = [ex[0] + ex[1] for ex in examples]
examples = [ex[:max_length] + [0] * max(0, max_length - len(ex)) for ex in examples]
examples = {"examples": examples, "input_lengths": input_lengths}
return examples
|
e47916d61dae92bdbe3cc8ccf8a06299878f1814
| 27,361 |
def md5s_loaded(func):
"""Decorator which automatically calls load_md5s."""
def newfunc(self, *args, **kwargs):
if self.md5_map == None:
self.load_md5s()
return func(self, *args, **kwargs)
return newfunc
|
9eba943b939c484280b6dca79cf79fc04337f0ab
| 27,362 |
def is_dap_message(message: str) -> bool:
"""Checks if a message contains information about some neighbour DAP."""
if "DAP" in message:
return True
return False
|
01294888ab5cac7560fb7c58669f14573e0c1acd
| 27,363 |
import random
def PhotoMetricDistortion(
img,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
"""Apply photometric distortion to image sequentially, every dictprocessation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
Args:
img (np.ndarray): imput image.
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
Returns:
dict: distorted_image
"""
contrast_lower, contrast_upper = contrast_range
saturation_lower, saturation_upper = saturation_range
def bgr2hsv(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
def hsv2bgr(img):
return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
def convert(img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(img):
"""Brightness distortion."""
if random.randint(2):
return convert(
img,
beta=random.uniform(-brightness_delta,
brightness_delta))
return img
def contrast(img):
"""Contrast distortion."""
if random.randint(2):
return convert(
img,
alpha=random.uniform(contrast_lower, contrast_upper))
return img
def saturation(img):
"""Saturation distortion."""
if random.randint(2):
img = bgr2hsv(img)
img[:, :, 1] = convert(
img[:, :, 1],
alpha=random.uniform(saturation_lower,
saturation_upper))
img = hsv2bgr(img)
return img
def hue(img):
"""Hue distortion."""
if random.randint(2):
img = bgr2hsv(img)
img[:, :,
0] = (img[:, :, 0].astype(int) +
random.randint(-hue_delta, hue_delta)) % 180
img = hsv2bgr(img)
return img
def distorted(img):
"""Call function to perform photometric distortion on images.
Args:
img (np.ndarray): imput image.
Returns:
dict: Result dict with images distorted.
"""
# random brightness
img = brightness(img)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
img = contrast(img)
# random saturation
img = saturation(img)
# random hue
img = hue(img)
# random contrast
if mode == 0:
img = contrast(img)
return img
return distorted(img)
|
04b371ec269d1ea52e7689726fe63b158bb53e11
| 27,364 |
def make_game():
"""Builds and returns an Apprehend game."""
return ascii_art.ascii_art_to_game(
GAME_ART, what_lies_beneath=' ',
sprites={'P': PlayerSprite, 'b': BallSprite},
update_schedule=['b', 'P'],nb_action=2)
|
9e6b1ead4ec65083ffe00c5e8708bd174c7eaf79
| 27,366 |
from typing import Any
from typing import Tuple
def process_makefile(
data: Any,
specification: Any,
path: Tuple[str, ...] = (),
apply_defaults: bool = True,
) -> Any:
"""Validates a makefile and applies defaults to missing keys.
Note that that default values are deep-copied before being set.
"""
if isinstance(specification, WithoutDefaults):
specification = specification.specification
data = process_makefile(data, specification, path, apply_defaults=False)
elif isinstance(specification, PreProcessMakefile):
data, specification = specification(path, data)
data = process_makefile(data, specification, path, apply_defaults)
elif _is_spec(specification):
_instantiate_spec(specification)(path, data)
elif isinstance(data, (dict, type(None))) and isinstance(specification, dict):
# A limitation of YAML is that empty subtrees are equal to None;
# this check ensures that empty subtrees to be handled properly
if data is None:
data = {}
_process_default_values(data, specification, path, apply_defaults)
for cur_key in data:
ref_key = _get_matching_spec_or_value(
cur_key, specification, path + (cur_key,)
)
data[cur_key] = process_makefile(
data[cur_key], specification[ref_key], path + (cur_key,), apply_defaults
)
elif isinstance(data, (list, type(None))) and isinstance(specification, list):
if not all(map(_is_spec, specification)):
raise TypeError(
"Lists contains non-specification objects (%r): %r"
% (_path_to_str(path), specification)
)
elif data is None: # See comment above
data = []
specification = IsListOf(*specification)
_instantiate_spec(specification)(path, data)
elif not isinstance(specification, (dict, list)):
raise TypeError(
"Unexpected type in makefile specification at %r: %r!"
% (_path_to_str(path), specification)
)
else:
raise MakefileError(
"Inconsistency between makefile specification and "
"current makefile at %s:\n Expected %s, "
"found %s %r!"
% (
_path_to_str(path),
type(specification).__name__,
type(data).__name__,
data,
)
)
return data
|
6d4d114b59c22c36414a3c4e0f232dc7782802f1
| 27,368 |
def load_fashion(n_data=70000):
"""Fetches the fashion MNIST dataset and returns its desired subset.
Args:
n_data (int, optional): The size of the wanted subset. Defaults to 70000.
Returns:
tuple: The dataset, the labels of elements, the names of categories and the name of the dataset.
"""
mnist = fetch_openml("Fashion-MNIST")
data, labels = __split_data(mnist["data"], mnist["target"].astype(int), n_data)
label_names = [
"T-shirt",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot"
]
return data, labels, label_names, "Fashion MNIST"
|
ed09938ad8c776f11029f6eaecb4f5a01cbf2fed
| 27,369 |
def add_countdown_ago(date, event, events_dates_per_event):
"""
Given a date and an even,
compute the number of days since the previous occurence of this events within events_dates_per_event
"""
countdown = []
for special_date in events_dates_per_event[event]:
date_count_down = (special_date - date).days
if date_count_down <= 0:
countdown.append(date_count_down)
return -1 * max(countdown)
|
3ced21ad6b53007e8777d245dee4fb84f83a3086
| 27,371 |
def _get_indexed_role(dep):
"""Return the function (governor/dependent) and role
based on the dependency relation type."""
gram_relation = dep['@type'][0:5]
if gram_relation in ["conj", "conj_"]:
return (-1, 'conj')
(function, role) = _relation_map[gram_relation]
return (_iminus_one(dep[function]['@idx']), role)
|
46edb717174f4267414604c362c22134265ceca4
| 27,373 |
def aug_op_mul_col(aug_input: Tensor, mul: float) -> Tensor:
"""
multiply each pixel
:param aug_input: the tensor to augment
:param mul: the multiplication factor
:return: the augmented tensor
"""
input_tensor = aug_input * mul
input_tensor = aug_op_clip(input_tensor, clip=(0, 1))
return input_tensor
|
42e7d1596a7417ca7d981573ffaac9dccff49490
| 27,374 |
import math
def example3(path: str):
"""Planetary orbit"""
print(f"\n{Col.TITL}{' Example 3 ':-^79}{Col.RES}\n")
if not path:
path = "qr_data/ex3.txt"
file = open(path, "r")
ls = file.readlines()
# Read input data from `file`
m = int(ls[0].replace("\n", ""))
data = np.zeros((m, 2))
for i in range(1, m + 1):
data[i - 1, :] = list(map(float, ls[i].replace("\n", "").split(" ")))
xs = data[:, 0]
ys = data[:, 1]
mat = np.ndarray((m, 5))
f = np.full(shape=m, fill_value=-1.0, dtype=float)
for i in range(10):
mat[i, 0] = xs[i] ** 2
mat[i, 1] = xs[i] * ys[i]
mat[i, 2] = ys[i] ** 2
mat[i, 3] = xs[i]
mat[i, 4] = ys[i]
sol = Qr.solve(mat, f)
res = f - np.matmul(mat, sol)
print(
f"{Col.INF} Given the system (A b):\n{np.column_stack((mat, f))}\n\n"
f"{Col.SOL} The obtained solution is:\n{sol}\n\n"
f"{Col.INF} Residue norm is: {np.linalg.norm(res)}"
)
# Find closest points on ellipse
def solve(x: float, y: float, coeff: np.ndarray) -> float:
"""Given x, returns the nearest y on the ellipse"""
a = coeff[2]
b = x * coeff[1] + coeff[4]
c = coeff[0] * x * x + coeff[3] * x + 1
sqt = math.sqrt(b * b - 4 * a * c)
y0 = (-b - sqt) / (2 * a)
y1 = (-b + sqt) / (2 * a)
if abs(y - y0) < abs(y - y1):
return y0
else:
return y1
yy = [solve(x, y, sol) for x, y in zip(xs, ys)]
close = zip(xs, ys, yy)
print(f"{Col.INF} The closest points are:\n[(x_i, y_i, yy_i)] = [")
for p in close:
print(f" {p},")
print("]\n")
|
aebe5d23d3d1080ce6795ba6e292ee4d973360a5
| 27,375 |
def drones_byDroneId_patch(droneId):
"""
Update the information on a specific drone
It is handler for PATCH /drones/<droneId>
"""
return handlers.drones_byDroneId_patchHandler(droneId)
|
93f66abc182ff4df3b3ad06bb8cdd38d19ab8e01
| 27,376 |
def get_categories(categories_file):
""" Group categories by image
"""
# map each category id to its name
id_to_category = {}
for category in categories_file['categories']:
id_to_category[category['id']] = category['name']
image_categories = {}
for category in categories_file['annotations']:
if category['image_id'] not in image_categories:
image_categories[category['image_id']] = []
if id_to_category[category['category_id']] not in image_categories[category['image_id']]:
image_categories[category['image_id']].append(id_to_category[category['category_id']])
return image_categories
|
10377ea688c2e33195f137cc9470cadd6eb2b9e7
| 27,378 |
from typing import Callable
from datetime import datetime
def get_profit_forecast(code: str,
getter: Callable[[str], pd.DataFrame] = rdb.get_profit_forecast):
""" 获取分析师的盈利预期
"""
today = datetime.datetime.now().strftime('%Y-%m-%d')
return getter(today).loc[code].to_dict()
|
1e2499eb785e1e4e50b6d82a2c3a4eea7b297a0d
| 27,379 |
import functools
def checkpoint_wrapper(module: nn.Module, offload_to_cpu: bool = False) -> nn.Module:
"""
A friendlier wrapper for performing activation checkpointing.
Compared to the PyTorch version, this version:
- wraps an nn.Module, so that all subsequent calls will use checkpointing
- handles keyword arguments in the forward
- handles non-Tensor outputs from the forward
- supports offloading activations to CPU
Usage::
checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True)
a, b = checkpointed_module(x, y=3, z=torch.Tensor([1]))
Args:
module (nn.Module): module to wrap
offload_to_cpu (Optional, bool): whether to offload activations to CPU
"""
module.forward = functools.partial(_checkpointed_forward, module.forward, offload_to_cpu) # type: ignore
return module
|
63d8f20265d2ade29e35ad5ae38b85e5a7f5f8af
| 27,380 |
def get_L_star_CS_d_t_i(L_CS_d_t_i, Q_star_trs_prt_d_t_i, region):
"""(9-2)(9-2)(9-3)
Args:
L_CS_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの冷房顕熱負荷(MJ/h)
Q_star_trs_prt_d_t_i: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の非居室への熱移動(MJ/h)
region: 地域区分
L_CS_d_t_i: returns: 日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房顕熱負荷
Returns:
日付dの時刻tにおける暖冷房区画iの1時間当たりの熱損失を含む負荷バランス時の冷房顕熱負荷
"""
H, C, M = get_season_array_d_t(region)
L_CS_d_t_i = L_CS_d_t_i[:5]
f = L_CS_d_t_i > 0
Cf = np.logical_and(C, f)
L_star_CS_d_t_i = np.zeros((5, 24 * 365))
L_star_CS_d_t_i[Cf] = np.clip(L_CS_d_t_i[Cf] + Q_star_trs_prt_d_t_i[Cf], 0, None)
return L_star_CS_d_t_i
|
2d679123b10d4c5206253a9a7a4aadfb14fc77da
| 27,381 |
def delete(isamAppliance, name, check_mode=False, force=False):
"""
Delete an Authentication Mechanism
"""
ret_obj = search(isamAppliance, name, check_mode=check_mode, force=force)
mech_id = ret_obj['data']
if mech_id == {}:
logger.info("Authentication Mechanism {0} not found, skipping delete.".format(name))
else:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Delete an Authentication Mechanism",
"{0}/{1}".format(module_uri, mech_id),
requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object()
|
4deb3c1362010d59abc39868f0b5fef8b4ddaa2f
| 27,382 |
def point_to_line_cluster_distance(point, line_cluster):
"""
Distance between a single point and a cluster of lines
"""
return val_point_to_line_cluster_distance(point.value, np.array([l.value for l in line_cluster]))
|
082ba543895e6bf25d013df6d94954de93468754
| 27,383 |
import time
def str_time_prop(start, end, date_format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formated in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, date_format))
etime = time.mktime(time.strptime(end, date_format))
ptime = stime + prop * (etime - stime)
return time.strftime(date_format, time.localtime(ptime))
|
6ce9a7ec5afd41df43ce029ed7391150f42b8d8a
| 27,384 |
def parse_fields(flds):
"""Parse Data Dictionary XML Fields
Arguments:
flds -- XML document element for fields
"""
fields_dict = dict()
for elem in flds:
elem_tag = str(elem.tag).strip()
if elem_tag.lower() != FIELD_TAG.lower():
raise ValueError(elem_tag + " element tag is not equal to " + FIELD_TAG)
elem_dict = dict()
elem_dict['type'] = str(elem.attrib['type']).upper()
elem_dict['number'] = int(elem.attrib['number'])
elem_dict['description'] = elem.attrib['description'] if 'description' in elem.attrib else ''
elem_values = dict()
for elem_value in elem:
elem_value_tag = str(elem_value.tag).strip()
if elem_value_tag.lower() != FIELD_VALUE_TAG.lower():
raise ValueError(elem_value_tag + " element value tag is not equal to " + FIELD_VALUE_TAG)
elem_value_enum = str(elem_value.attrib['enum']).strip()
elem_value_desc = str(elem_value.attrib['description']).strip()
elem_values[elem_value_enum] = elem_value_desc
if elem_values:
elem_dict['values'] = elem_values
fields_dict[str(elem.attrib['name']).strip()] = elem_dict
return fields_dict
|
c3057f2dc3ff525564589024ec75a58bab0a3331
| 27,385 |
import json
def analyse_json_data(input_file_path):
"""
:type input_file_path: str
:rtype: InputData
:raise FileNotFoundError
"""
data = InputData()
try:
with open(input_file_path, 'r') as input_file:
json_data = json.load(input_file)
except FileNotFoundError as e:
raise InputFileNotFoundError(e)
number_of_aggregated_packets = 0
previous_time_received_in_microseconds = 0
for time_received_in_microseconds in \
[time_in_nanoseconds / 1000 for time_in_nanoseconds in json_data['timesReceivedInNanoseconds']]:
delta_time = time_received_in_microseconds - previous_time_received_in_microseconds
if delta_time > 400:
if number_of_aggregated_packets != 0:
data.add_big_packet(number_of_aggregated_packets)
number_of_aggregated_packets = 0
number_of_aggregated_packets += 1
previous_time_received_in_microseconds = time_received_in_microseconds
if number_of_aggregated_packets != 0:
data.add_big_packet(number_of_aggregated_packets)
return data
|
fe4eef01d2fe14f920a1bf64687410ef9d8e13d9
| 27,386 |
import math
def process_datastore_tweets(project, dataset, pipeline_options):
"""Creates a pipeline that reads tweets from Cloud Datastore from the last
N days. The pipeline finds the top most-used words, the top most-tweeted
URLs, ranks word co-occurrences by an 'interestingness' metric (similar to
on tf* idf).
"""
user_options = pipeline_options.view_as(UserOptions)
DAYS = 4
p = beam.Pipeline(options=pipeline_options)
# Read entities from Cloud Datastore into a PCollection, then filter to get
# only the entities from the last DAYS days.
lines = (p | QueryDatastore(project, DAYS)
| beam.ParDo(FilterDate(user_options, DAYS))
)
global_count = AsSingleton(
lines
| 'global count' >> beam.combiners.Count.Globally())
# Count the occurrences of each word.
percents = (lines
| 'split' >> (beam.ParDo(WordExtractingDoFn())
.with_output_types(unicode))
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(lambda (word, ones): (word, sum(ones)))
| 'in tweets percent' >> beam.Map(
lambda (word, wsum), gc: (word, float(wsum) / gc), global_count))
top_percents = (percents
| 'top 500' >> combiners.Top.Of(500, lambda x, y: x[1] < y[1])
)
# Count the occurrences of each expanded url in the tweets
url_counts = (lines
| 'geturls' >> (beam.ParDo(URLExtractingDoFn())
.with_output_types(unicode))
| 'urls_pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'urls_group' >> beam.GroupByKey()
| 'urls_count' >> beam.Map(lambda (word, ones): (word, sum(ones)))
| 'urls top 300' >> combiners.Top.Of(300, lambda x, y: x[1] < y[1])
)
# Define some inline helper functions.
def join_cinfo(cooccur, percents):
"""Calculate a co-occurence ranking."""
word1 = cooccur[0][0]
word2 = cooccur[0][1]
try:
word1_percent = percents[word1]
weight1 = 1 / word1_percent
word2_percent = percents[word2]
weight2 = 1 / word2_percent
return (cooccur[0], cooccur[1], cooccur[1] *
math.log(min(weight1, weight2)))
except:
return 0
def generate_cooccur_schema():
"""BigQuery schema for the word co-occurrence table."""
json_str = json.dumps({'fields': [
{'name': 'w1', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'w2', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'log_weight', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})
# {'name': 'ts', 'type': 'STRING', 'mode': 'NULLABLE'}]})
return parse_table_schema_from_json(json_str)
def generate_url_schema():
"""BigQuery schema for the urls count table."""
json_str = json.dumps({'fields': [
{'name': 'url', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})
# {'name': 'ts', 'type': 'STRING', 'mode': 'NULLABLE'}]})
return parse_table_schema_from_json(json_str)
def generate_wc_schema():
"""BigQuery schema for the word count table."""
json_str = json.dumps({'fields': [
{'name': 'word', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'percent', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})
# {'name': 'ts', 'type': 'STRING', 'mode': 'NULLABLE'}]})
return parse_table_schema_from_json(json_str)
# Now build the rest of the pipeline.
# Calculate the word co-occurence scores.
cooccur_rankings = (lines
| 'getcooccur' >> (beam.ParDo(CoOccurExtractingDoFn()))
| 'co_pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'co_group' >> beam.GroupByKey()
| 'co_count' >> beam.Map(lambda (wordts, ones): (wordts, sum(ones)))
| 'weights' >> beam.Map(join_cinfo, AsDict(percents))
| 'co top 300' >> combiners.Top.Of(300, lambda x, y: x[2] < y[2])
)
# Format the counts into a PCollection of strings.
wc_records = top_percents | 'format' >> beam.FlatMap(
lambda x: [{'word': xx[0], 'percent': xx[1],
'ts': user_options.timestamp.get()} for xx in x])
url_records = url_counts | 'urls_format' >> beam.FlatMap(
lambda x: [{'url': xx[0], 'count': xx[1],
'ts': user_options.timestamp.get()} for xx in x])
co_records = cooccur_rankings | 'co_format' >> beam.FlatMap(
lambda x: [{'w1': xx[0][0], 'w2': xx[0][1], 'count': xx[1],
'log_weight': xx[2],
'ts': user_options.timestamp.get()} for xx in x])
# Write the results to three BigQuery tables.
wc_records | 'wc_write_bq' >> beam.io.Write(
beam.io.BigQuerySink(
'%s:%s.word_counts' % (project, dataset),
schema=generate_wc_schema(),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))
url_records | 'urls_write_bq' >> beam.io.Write(
beam.io.BigQuerySink(
'%s:%s.urls' % (project, dataset),
schema=generate_url_schema(),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))
co_records | 'co_write_bq' >> beam.io.Write(
beam.io.BigQuerySink(
'%s:%s.word_cooccur' % (project, dataset),
schema=generate_cooccur_schema(),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))
# Actually run the pipeline.
return p.run()
|
db60b67d85f2d703e8cf46f5d89d56d8c9cfcbed
| 27,387 |
def unpack_uid(uid):
"""
Convert packed PFile UID to standard DICOM UID.
Parameters
----------
uid : str
packed PFile UID as a string
Returns
-------
uid : str
unpacked PFile UID as string
"""
return ''.join([str(i-1) if i < 11 else '.' for pair in [(ord(c) >> 4, ord(c) & 15) for c in uid] for i in pair if i > 0])
|
cb131f3df386c40382cf70ddee5125f901de5fa8
| 27,388 |
def command(name=None):
"""A decorator to register a subcommand with the global `Subcommands` instance.
"""
def decorator(f):
_commands.append((name, f))
return f
return decorator
|
89dd62a97ce6af317dbb2d33273fa215d740deab
| 27,389 |
def agse_convert(invec, swdata, insys='gse'):
"""Convert between GSE and aberrated GSE
Using common definiton, e.g., Schwartz 1998
(S.J. Schwartz, "Shock and Discontinuity Normals, Mach Numbers, and Related Parameters",
In: Analysis Methods for Multi-Spacecraft Data, Eds.: G. Paschmann and P. Daly,
ISSI Scientific Reports Series, ESA/ISSI, Vol. 1, ISBN 1608-280X, 1998, pp.249-270)
Neglects transverse components of SW velocity
"""
assert insys in ('gse', 'agse')
alpha = np.arctan(30/swdata['Plasma_bulk_speed'])
gse_to_agse = np.zeros((3,3), dtype=float)
gse_to_agse[2, 2] = 1
gse_to_agse[0, 0] = np.cos(alpha)
gse_to_agse[1, 1] = np.cos(alpha)
gse_to_agse[0, 1] = -np.sin(alpha)
gse_to_agse[1, 0] = np.sin(alpha)
if insys == 'gse':
outvec = np.dot(gse_to_agse, invec)
else:
outvec = np.dot(gse_to_agse.T, invec)
return outvec
|
3d5d77565187e9242104d8b25260b86458109701
| 27,390 |
import numpy as np
def parse_geom_text_output(out_lines, input_dict=None):
"""
Parse output of .geom file
:param out_lines: a list of lines from the readline function
:param input_dict: not in use at the moment
:return parsed_data: key, value of the trajectories of cell, atoms,
force etc
"""
txt = out_lines
Hartree = units['Eh']
Bohr = units['a0']
# Yeah, we know that...
cell_list = []
species_list = []
geom_list = []
forces_list = []
energy_list = []
temperature_list = []
velocity_list = []
current_pos = []
current_species = []
current_forces = []
current_velocity = []
current_cell = []
in_header = False
for i, line in enumerate(txt):
if 'begin header' in line.lower():
in_header = True
continue
if 'end header' in line.lower():
in_header = False
continue
if in_header:
continue # Skip header lines
sline = line.split()
if '<-- E' in line:
energy_list.append(float(sline[0]) * Hartree)
continue
elif '<-- h' in line:
current_cell.append(list(map(float, sline[:3])))
continue
elif '<-- R' in line:
current_pos.append(list(map(float, sline[2:5])))
current_species.append(sline[0])
elif '<-- F' in line:
current_forces.append(list(map(float, sline[2:5])))
elif '<-- V' in line:
current_velocity.append(list(map(float, sline[2:5])))
elif '<-- T' in line:
temperature_list.append(float(sline[0]))
elif not line.strip() and current_cell:
cell_list.append(current_cell)
species_list.append(current_species)
geom_list.append(current_pos)
forces_list.append(current_forces)
current_cell = []
current_species = []
current_pos = []
current_forces = []
if current_velocity:
velocity_list.append(current_velocity)
current_velocity = []
if len(species_list) == 0:
raise RuntimeError('No data found in geom file')
out = dict(
cells=np.array(cell_list) * Bohr,
positions=np.array(geom_list) * Bohr,
forces=np.array(forces_list) * Hartree / Bohr,
geom_energy=np.array(energy_list),
symbols=species_list[0],
)
if velocity_list:
out['velocities'] = np.array(velocity_list) * Bohr
return out
|
a01ba11130d91aa2211563322384e772dfe7ad1a
| 27,391 |
def evalKnapsackBalanced(individual):
"""
Variant of the original weight-value knapsack problem with added third object being minimizing weight difference between items.
"""
weight, value = evalKnapsack(individual)
balance = 0.0
for a,b in zip(individual, list(individual)[1:]):
balance += abs(items[a][0]-items[b][0])
if len(individual) > MAX_ITEM or weight > MAX_WEIGHT:
return weight, value, 1e30 # Ensure overweighted bags are dominated
return weight, value, balance
|
2037e6b33d4cd8c76496b8fbb866febbf355aaac
| 27,392 |
def complexify_module(lines_in):
"""
Complexify a module by separating its derived types, functions, and subroutines
and passing them through a line complexification function.
Parameters
----------
lines_in : list of string
List of strings source code for one module to be complexified
Returns
-------
lines_out : list of string
List of strings representing the output complexified module
"""
N_line = len(lines_in)
iLine = 0
lines_out = []
# Start Module
lines_out.append(lines_in[0] + "use MOD_COMPLEXIFY\n")
iLine += 1
while iLine < N_line:
# Handle Derived Type
if not re_type_start.search(lines_in[iLine]) is None:
iSearch = 0
lines_type = []
# Group All Subroutine Lines
while re_type_end.search(lines_in[iLine + iSearch]) is None:
lines_type.append(lines_in[iLine + iSearch])
iSearch += 1
lines_type.append(lines_in[iLine + iSearch])
iSearch += 1
# Fix Function
lines_fixed = complexify_type(lines_type)
for line in lines_fixed:
lines_out.append(line)
iLine += iSearch
# Handle Function
elif not re_function_start.search(lines_in[iLine]) is None:
iSearch = 0
lines_function = []
# Group All Subroutine Lines
while re_function_end.search(lines_in[iLine + iSearch]) is None:
lines_function.append(lines_in[iLine + iSearch])
iSearch += 1
lines_function.append(lines_in[iLine + iSearch])
iSearch += 1
# Fix Function
lines_fixed = complexify_function(lines_function)
for line in lines_fixed:
lines_out.append(line)
iLine += iSearch
# Handle Subroutine
elif not re_subroutine_start.search(lines_in[iLine]) is None:
iSearch = 0
lines_subroutine = []
# Group All Subroutine Lines
while re_subroutine_end.search(lines_in[iLine + iSearch]) is None:
lines_subroutine.append(lines_in[iLine + iSearch])
iSearch += 1
lines_subroutine.append(lines_in[iLine + iSearch])
iSearch += 1
# Fix Subroutine
lines_fixed = complexify_subroutine(lines_subroutine)
for line in lines_fixed:
lines_out.append(line)
iLine += iSearch
# Write Line Unchanged
else:
lines_out.append(lines_in[iLine])
iLine += 1
return lines_out
|
dfa30e883c2addeb1e3e5ac0c84be4f7d7834277
| 27,394 |
from datetime import datetime
import pytz
def utc_right_now():
"""
Returns a datetime object reflecting the time in UTC as of when this function was called.
"""
return datetime.datetime.now(tz=pytz.utc).replace(tzinfo=None)
|
870eff5c5f744b8d3e84a21f00adc48bae088221
| 27,395 |
def process_img_border(img_array, polygon_pts, border=6):
"""Process Raw Data into
Args:
img_array (numpy array): numpy representation of image.
polygon_pts (array): corners of the building polygon.
Returns:
numpy array: .
"""
height, width, _ = img_array.shape
xcoords = polygon_pts[:, 0]
ycoords = polygon_pts[:, 1]
xmin, xmax = np.min(xcoords), np.max(xcoords)
ymin, ymax = np.min(ycoords), np.max(ycoords)
xdiff = xmax - xmin
ydiff = ymax - ymin
#Extend image by scale percentage
xmin = max(int(xmin - border), 0)
xmax = min(int(xmax + border), width)
ymin = max(int(ymin - border), 0)
ymax = min(int(ymax + border), height)
(X,Y,Z)=img_array.shape
return img_array[ymin:ymax, xmin:xmax, :]
|
7ad98cc9e66e34849a534a42e3d78062e1b779c4
| 27,396 |
from typing import Optional
from typing import Mapping
def get_connection(arn: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionResult:
"""
Provides details about CodeStar Connection.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.get_connection(arn=aws_codestarconnections_connection["example"]["arn"])
```
:param str arn: The CodeStar Connection ARN.
:param Mapping[str, str] tags: Map of key-value resource tags to associate with the resource.
"""
__args__ = dict()
__args__['arn'] = arn
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:codestarconnections/getConnection:getConnection', __args__, opts=opts, typ=GetConnectionResult).value
return AwaitableGetConnectionResult(
arn=__ret__.arn,
connection_status=__ret__.connection_status,
id=__ret__.id,
name=__ret__.name,
provider_type=__ret__.provider_type,
tags=__ret__.tags)
|
76494278cfd1ec4adf3ecd90d8f5536f545a814f
| 27,398 |
from typing import Counter
def generate_samples(n_samples, func, *args, **kwargs):
"""Call a function a bunch of times and count the results.
Args:
n_samples: Number of time to call the function.
func: The function results are counted from.
*args
**args: The arguments to pass to func.
Returns:
Counter containing results.
"""
samples = Counter()
for _ in range(n_samples):
res = func(*args, **kwargs)
samples[res] += 1
return samples
|
625c2bf6713420e26704d2c2842504343be09434
| 27,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.