code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def mf_update(self, state_below, state_above, layer_above=None, double_weights=False, iter_name=None): <NEW_LINE> <INDENT> self.input_space.validate(state_below) <NEW_LINE> if self.requires_reformat: <NEW_LINE> <INDENT> if not isinstance(state_below, tuple): <NEW_LINE> <INDENT> for sb in get_debug_values(state_below): <NEW_LINE> <INDENT> if sb.shape[0] != self.dbm.batch_size: <NEW_LINE> <INDENT> raise ValueError("self.dbm.batch_size is %d but got " + "shape of %d" % (self.dbm.batch_size, sb.shape[0])) <NEW_LINE> <DEDENT> assert reduce(operator.mul, sb.shape[1:]) == self.input_dim <NEW_LINE> <DEDENT> <DEDENT> state_below = self.input_space.format_as(state_below, self.desired_space) <NEW_LINE> <DEDENT> if iter_name is None: <NEW_LINE> <INDENT> iter_name = 'anon' <NEW_LINE> <DEDENT> if state_above is not None: <NEW_LINE> <INDENT> assert layer_above is not None <NEW_LINE> msg = layer_above.downward_message(state_above) <NEW_LINE> msg.name = 'msg_from_' + layer_above.layer_name + '_to_' + self.layer_name + '[' + iter_name+']' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> msg = None <NEW_LINE> <DEDENT> if double_weights: <NEW_LINE> <INDENT> state_below = 2. * state_below <NEW_LINE> state_below.name = self.layer_name + '_'+iter_name + '_2state' <NEW_LINE> <DEDENT> z = T.dot(state_below, self.ising_weights()) + self.ising_b() <NEW_LINE> if self.layer_name is not None and iter_name is not None: <NEW_LINE> <INDENT> z.name = self.layer_name + '_' + iter_name + '_z' <NEW_LINE> <DEDENT> if msg is not None: <NEW_LINE> <INDENT> z = z + msg <NEW_LINE> <DEDENT> h = T.tanh(self.beta * z) <NEW_LINE> return h
.. todo:: WRITEME
625941b36fece00bbac2d4f2
def get_largest_kth(arr: list, k: int) -> list: <NEW_LINE> <INDENT> if not arr: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> if len(arr) <= k: <NEW_LINE> <INDENT> return arr <NEW_LINE> <DEDENT> answer = arr[0: k] <NEW_LINE> answer.sort(reverse=True) <NEW_LINE> tmp = arr[k:] <NEW_LINE> for item in tmp: <NEW_LINE> <INDENT> if item > answer[-1]: <NEW_LINE> <INDENT> answer[-1] = item <NEW_LINE> answer.sort(reverse=True) <NEW_LINE> <DEDENT> <DEDENT> return answer
Given array and find largest K elements :return:
625941b38e05c05ec3eea128
def test_assign_gt(self): <NEW_LINE> <INDENT> self.assertTrue(os.path.isfile(ALIGNMENTS[2])) <NEW_LINE> alignall = ALIGNMENTS[2] <NEW_LINE> test_refgts = genotype.getrefseqs(ALIGNMENTS[0], ALIGNMENTS[2]) <NEW_LINE> threshold_norm = 150 <NEW_LINE> threshold_lax = 50 <NEW_LINE> threshold_strict = 200 <NEW_LINE> assigngt_norm = genotype.assign_gt(alignall, test_refgts, threshold_norm) <NEW_LINE> self.assertTrue(len(assigngt_norm[2]) == assigngt_norm[1]) <NEW_LINE> assigngt_lax = genotype.assign_gt(alignall, test_refgts, threshold_lax) <NEW_LINE> self.assertTrue(len(assigngt_lax[2]) == assigngt_lax[1]) <NEW_LINE> assigngt_strict = genotype.assign_gt(alignall, test_refgts, threshold_strict) <NEW_LINE> self.assertTrue(len(assigngt_strict[2]) == assigngt_strict[1]) <NEW_LINE> self.assertTrue(assigngt_strict[1] < assigngt_norm[1] < assigngt_lax[1]) <NEW_LINE> self.assertTrue(len(assigngt_strict[0]) < len(assigngt_norm[0]) < len(assigngt_lax[0]))
Test assigning gneotypes with actual data. I have not created toy `.fasta` files for testing, so test on real data. There is a rough check for mistyping in the main code, so just make sure output is as expected. Overall, we only check if our genotypes are logical and make no claim that they are fully accurate. Check that if threshold is changed, number of new genotypes and mistyped genotypes changes as expected. This test is slow and takes ~25 seconds to run.
625941b3796e427e537b0379
def ReadStataDct(dct_file, **options): <NEW_LINE> <INDENT> type_map = dict(byte=int, int=int, long=int, float=float, double=float, numeric=int) <NEW_LINE> var_info = [] <NEW_LINE> for line in open(dct_file, **options): <NEW_LINE> <INDENT> match = re.search( r'_column\(([^)]*)\)', line) <NEW_LINE> if match: <NEW_LINE> <INDENT> start = int(match.group(1)) <NEW_LINE> t = line.split() <NEW_LINE> vtype, name, fstring = t[1:4] <NEW_LINE> name = name.lower() <NEW_LINE> if vtype.startswith('str'): <NEW_LINE> <INDENT> vtype = str <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> vtype = type_map[vtype] <NEW_LINE> <DEDENT> long_desc = ' '.join(t[4:]).strip('"') <NEW_LINE> var_info.append((start, vtype, name, fstring, long_desc)) <NEW_LINE> <DEDENT> <DEDENT> columns = ['start', 'type', 'name', 'fstring', 'desc'] <NEW_LINE> variables = pandas.DataFrame(var_info, columns=columns) <NEW_LINE> variables['end'] = variables.start.shift(-1) <NEW_LINE> variables.loc[len(variables)-1, 'end'] = 0 <NEW_LINE> dct = FixedWidthVariables(variables, index_base=1) <NEW_LINE> return dct
Reads a Stata dictionary file. dct_file: string filename options: dict of options passed to open() returns: FixedWidthVariables object
625941b35f7d997b87174853
def __init__(self, id, name, msisdn, status, market, tariff, customer, payer): <NEW_LINE> <INDENT> self.id = id <NEW_LINE> self.name = name <NEW_LINE> self.msisdn = msisdn <NEW_LINE> self.status = status <NEW_LINE> self.market = market <NEW_LINE> self.tariff = tariff <NEW_LINE> self.customer = customer <NEW_LINE> self.payer = payer
Class constructor Subscriber entity Args: id (int): subscriber id name (str): name msisdn (str): MSISDN status (str): status, active|deactive|suspend market (int): market id, 1|2|3 GSM|DSL|FIX tariff (int): tariff id, 433|459|434|460 customer (int): assigned customer id payer (int): assigned payer id
625941b3a17c0f6771cbde10
def register_ioloop(self, callback: ReplierCallback): <NEW_LINE> <INDENT> def call_callback_replier(*args): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> msg = self.recv() <NEW_LINE> <DEDENT> except Exception as exc: <NEW_LINE> <INDENT> rv = callback(None, exc) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> rv = callback(msg, None) <NEW_LINE> <DEDENT> if asyncio.iscoroutine(rv): <NEW_LINE> <INDENT> asyncio.ensure_future(rv) <NEW_LINE> <DEDENT> <DEDENT> IOLoop.current().add_handler(self, call_callback_replier, IOLoop.READ)
TornadoのIOLoopにメッセージ受信時のコールバックを登録. :param Callable callback:
625941b326068e7796caea90
def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks): <NEW_LINE> <INDENT> batch_vids, num_frames, _ = vid_embeds.size() <NEW_LINE> vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3) <NEW_LINE> batch_phrases, num_phrases, dim_embed = phrase_embeds.size() <NEW_LINE> vid_2d_embeds = vid_embeds.view(-1, dim_embed) <NEW_LINE> phrase_2d_embeds = phrase_embeds.view(-1, dim_embed) <NEW_LINE> ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view( batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2) <NEW_LINE> vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) <NEW_LINE> vid_attn_per_word[vid_attn_per_word < 0] = 0 <NEW_LINE> vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2) <NEW_LINE> vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) <NEW_LINE> vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2) <NEW_LINE> if self.config.attn_fusion == 'embed': <NEW_LINE> <INDENT> vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds) <NEW_LINE> word_attn_sims = torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds)) <NEW_LINE> <DEDENT> elif self.config.attn_fusion == 'sim': <NEW_LINE> <INDENT> word_attn_sims = torch.sum(ground_sims * vid_attn_per_word, dim=2) <NEW_LINE> <DEDENT> phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) / torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1) <NEW_LINE> return phrase_scores
Args: - vid_embeds: (batch, num_frames, embed_size) - vid_masks: (batch, num_frames) - phrase_embeds: (batch, num_phrases, embed_size) - phrase_masks: (batch, num_phrases)
625941b3091ae35668666d1e
def report_evaluation(self, sect, stats, previous_stats): <NEW_LINE> <INDENT> if stats['statement'] == 0: <NEW_LINE> <INDENT> raise EmptyReport() <NEW_LINE> <DEDENT> evaluation = self.config.evaluation <NEW_LINE> try: <NEW_LINE> <INDENT> note = eval(evaluation, {}, self.stats) <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> msg = 'An exception occurred while rating: %s' % ex <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> stats['global_note'] = note <NEW_LINE> msg = 'Your code has been rated at %.2f/10' % note <NEW_LINE> if 'global_note' in previous_stats: <NEW_LINE> <INDENT> msg += ' (previous run: %.2f/10)' % previous_stats['global_note'] <NEW_LINE> <DEDENT> if self.config.comment: <NEW_LINE> <INDENT> msg = '%s\n%s' % (msg, config.get_note_message(note)) <NEW_LINE> <DEDENT> <DEDENT> sect.append(Text(msg))
make the global evaluation report
625941b38da39b475bd64d2e
def smooth_and_interp_trajs(trajs, filter_width=3, num=100): <NEW_LINE> <INDENT> normalized_trajs = normalize_trajs_helper(trajs) <NEW_LINE> processed_trajs = [smooth_and_interp_trajs_helper(stroke, filter_width=filter_width, num=num) for stroke in normalized_trajs] <NEW_LINE> return processed_trajs
a helper to smooth and interp with specified number of samples given a letter instance which is possibly consisted of multiple stroke trajectories
625941b3be8e80087fb20a07
def __str__ ( self ): <NEW_LINE> <INDENT> mix = dmsUnits.singleToMix ( self.hours ) <NEW_LINE> values = dmsUnits.format ( mix, decimals=3, lz=True ) <NEW_LINE> return "[%sh %sm %ss]" % tuple(values)
Convert to a string such as "[04h 40m 5.170s]".
625941b3de87d2750b85fb45
def update_row(self, row, cols, value): <NEW_LINE> <INDENT> self._del_row(row) <NEW_LINE> v = np.array(value) <NEW_LINE> for j, col in enumerate(cols): <NEW_LINE> <INDENT> if np.isscalar(v): <NEW_LINE> <INDENT> self.set_value(row, col, v) <NEW_LINE> <DEDENT> elif v.ndim == 1: <NEW_LINE> <INDENT> self.set_value(row, col, v[j]) <NEW_LINE> <DEDENT> elif v.ndim == 2: <NEW_LINE> <INDENT> self.set_value(row, col, v[:, j]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Inconsistent data array provided.")
Update a row in the matrix. If the row does not exist it will be added. Existing entries for the row will be removed first.
625941b37d847024c06be078
def forward(self, xpad, ilens): <NEW_LINE> <INDENT> for layer in six.moves.range(self.elayers): <NEW_LINE> <INDENT> xpack = pack_padded_sequence(xpad, ilens, batch_first=True) <NEW_LINE> bilstm = getattr(self, 'bilstm' + str(layer)) <NEW_LINE> bilstm.flatten_parameters() <NEW_LINE> ys, (hy, cy) = bilstm(xpack) <NEW_LINE> ypad, ilens = pad_packed_sequence(ys, batch_first=True) <NEW_LINE> sub = self.subsample[layer + 1] <NEW_LINE> if sub > 1: <NEW_LINE> <INDENT> ypad = ypad[:, ::sub] <NEW_LINE> ilens = [int(i + 1) // sub for i in ilens] <NEW_LINE> <DEDENT> projected = getattr(self, 'bt' + str(layer) )(ypad.contiguous().view(-1, ypad.size(2))) <NEW_LINE> xpad = torch.tanh(projected.view(ypad.size(0), ypad.size(1), -1)) <NEW_LINE> del hy, cy <NEW_LINE> <DEDENT> return xpad, ilens
BLSTMP forward :param xs: :param ilens: :return:
625941b3b545ff76a8913bd9
def __init__(self, call, *args_to_append): <NEW_LINE> <INDENT> Tweak.__init__(self, call) <NEW_LINE> self.after(*args_to_append)
Constructor.
625941b3e64d504609d745f8
def get_volume_type(self, volume_id): <NEW_LINE> <INDENT> url = "types/%s" % str(volume_id) <NEW_LINE> resp, body = self.get(url) <NEW_LINE> body = json.loads(body) <NEW_LINE> self.expected_success(200, resp.status) <NEW_LINE> return resp, body['volume_type']
Returns the details of a single volume_type.
625941b30fa83653e4656d7d
@commandWrap <NEW_LINE> def PlaybackToggle(*args, **kwargs): <NEW_LINE> <INDENT> return cmds.PlaybackToggle(*args, **kwargs)
:rtype: list|str|DagNode|AttrObject|ArrayAttrObject|Components1Base
625941b363b5f9789fde6e9d
def __init__(self, test_helpers, max_len, num_workers): <NEW_LINE> <INDENT> self.test_helpers = test_helpers <NEW_LINE> self.max_len = max_len <NEW_LINE> self.num_workers = num_workers <NEW_LINE> print ('Meta Review Helper Initialized with ', len(test_helpers), ' Data Helpers') <NEW_LINE> self.processes = [] <NEW_LINE> self.inQueue = multiprocessing.Queue() <NEW_LINE> self.outQueue = multiprocessing.Queue() <NEW_LINE> for _ in range(self.num_workers): <NEW_LINE> <INDENT> p = multiprocessing.Process(target=self.rev_from_dmgr, args=(self.inQueue, self.outQueue)) <NEW_LINE> p.start() <NEW_LINE> self.processes.append(p) <NEW_LINE> <DEDENT> print ('multi processor ready with ', len(self.processes), ' worker threads') <NEW_LINE> print ("don't forget to call close")
test helpers: a list of DataMgr objs max_len: approx max len (words/ids) of the merged review num_workers: set to max num cpus
625941b31f5feb6acb0c4915
def cast(*args): <NEW_LINE> <INDENT> return _itkCompose3DCovariantVectorImageFilterPython.itkCompose3DCovariantVectorImageFilterIF3ICVF33_Superclass_cast(*args)
cast(itkLightObject obj) -> itkCompose3DCovariantVectorImageFilterIF3ICVF33_Superclass
625941b3ec188e330fd5a560
def __init__( self, id: str, name: str, imctools_version: str, created: datetime, metadata: Optional[Dict[str, Any]] = None, ): <NEW_LINE> <INDENT> self.id = id <NEW_LINE> self.name = name <NEW_LINE> self.imctools_version = imctools_version <NEW_LINE> self.created = created <NEW_LINE> self.metadata = metadata <NEW_LINE> self.slides: Dict[int, Slide] = dict() <NEW_LINE> self.acquisitions: Dict[int, Acquisition] = dict() <NEW_LINE> self.panoramas: Dict[int, Panorama] = dict() <NEW_LINE> self.channels: Dict[int, Channel] = dict()
Parameters ---------- id Unique ID of the session (UUID). name Session name. imctools_version Version of imctools library used for conversion. created Datetime of session creation. metadata Whole set of original (raw) metadata as a dictionary.
625941b3d18da76e23532288
def setListePions(c,listePions): <NEW_LINE> <INDENT> c["pions"]=listePions <NEW_LINE> pass
place la liste des pions passées en paramètre sur la carte paramètres: c: est une carte listePions: la liste des pions à poser Cette fonction ne retourne rien mais modifie la carte
625941b3004d5f362079a0f0
def setugid(user): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> from pwd import getpwuid <NEW_LINE> passwd = getpwuid(int(user)) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> from pwd import getpwnam <NEW_LINE> passwd = getpwnam(user) <NEW_LINE> <DEDENT> if hasattr(os, 'initgroups'): <NEW_LINE> <INDENT> os.initgroups(passwd.pw_name, passwd.pw_gid) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> import ctypes <NEW_LINE> if ctypes.CDLL(None).initgroups(passwd.pw_name, passwd.pw_gid) < 0: <NEW_LINE> <INDENT> err = ctypes.c_int.in_dll(ctypes.pythonapi,"errno").value <NEW_LINE> raise OSError(err, os.strerror(err), 'initgroups') <NEW_LINE> <DEDENT> <DEDENT> os.setgid(passwd.pw_gid) <NEW_LINE> os.setuid(passwd.pw_uid) <NEW_LINE> os.environ['HOME'] = passwd.pw_dir
Change process user and group ID Argument is a numeric user id or a user name
625941b39b70327d1c4e0b8c
def pass_time(self, faller: Faller) -> None: <NEW_LINE> <INDENT> while self.found_matches() == True: <NEW_LINE> <INDENT> self.clear_matches() <NEW_LINE> self.drop_air_cells() <NEW_LINE> if self.find_matches() == True: <NEW_LINE> <INDENT> self._found_matches = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._found_matches = False <NEW_LINE> faller.end_life() <NEW_LINE> <DEDENT> <DEDENT> if faller.state() == "LANDED": <NEW_LINE> <INDENT> faller.freeze() <NEW_LINE> if self.find_matches() == True: <NEW_LINE> <INDENT> self._found_matches = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> faller.end_life() <NEW_LINE> <DEDENT> <DEDENT> elif can_drop(faller.bottom_jewel(), self) == True: <NEW_LINE> <INDENT> self.clear_faller(faller) <NEW_LINE> faller.drop() <NEW_LINE> self.position_faller(faller) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if faller.state() != "DEAD": <NEW_LINE> <INDENT> faller.land()
Depending on the state of the faller and its cells, This function represents the passing of time and determines whether a Faller will simply drop or the Field will attempt to Find matches.
625941b330dc7b7665901723
def main(): <NEW_LINE> <INDENT> os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_moodle.settings') <NEW_LINE> try: <NEW_LINE> <INDENT> from django.core.management import execute_from_command_line <NEW_LINE> <DEDENT> except ImportError as exc: <NEW_LINE> <INDENT> raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc <NEW_LINE> <DEDENT> execute_from_command_line(sys.argv)
Run administrative tasks.
625941b37047854f462a11c6
def __init__(self, dg=None, dg_tag=None, tag_entry=None, entry_view=None, local_vars_configuration=None): <NEW_LINE> <INDENT> if local_vars_configuration is None: <NEW_LINE> <INDENT> local_vars_configuration = Configuration() <NEW_LINE> <DEDENT> self.local_vars_configuration = local_vars_configuration <NEW_LINE> self._dg = None <NEW_LINE> self._dg_tag = None <NEW_LINE> self._tag_entry = None <NEW_LINE> self._entry_view = None <NEW_LINE> self.discriminator = None <NEW_LINE> self.dg = dg <NEW_LINE> if dg_tag is not None: <NEW_LINE> <INDENT> self.dg_tag = dg_tag <NEW_LINE> <DEDENT> if tag_entry is not None: <NEW_LINE> <INDENT> self.tag_entry = tag_entry <NEW_LINE> <DEDENT> if entry_view is not None: <NEW_LINE> <INDENT> self.entry_view = entry_view
RfidOrigin - a model defined in OpenAPI
625941b38a349b6b435e7f34
def __init__(self, host, port, commander): <NEW_LINE> <INDENT> assert hasattr(commander, 'command') <NEW_LINE> asyncore.dispatcher.__init__(self) <NEW_LINE> self.commander = commander <NEW_LINE> self.create_socket(socket.AF_INET, socket.SOCK_STREAM) <NEW_LINE> self.set_reuse_addr() <NEW_LINE> self.bind((host, port)) <NEW_LINE> self.listen(5)
commander: an intsnace of class that has "command(self, command)" interface
625941b321bff66bcd68470e
def __init__(self, workspace=None, name=None, description=None, owner=None, uri=None, version=None, query=None, labels=None, **kwargs): <NEW_LINE> <INDENT> self.workspace = workspace <NEW_LINE> self.name = name <NEW_LINE> self.description = description <NEW_LINE> self.owner = owner <NEW_LINE> self.uri = uri <NEW_LINE> self.version = version <NEW_LINE> self.query = query <NEW_LINE> self.labels = labels <NEW_LINE> self._id = "" <NEW_LINE> self._create_time = ""
Args: workspace {str} -- Optional name of the workspace. name {str} -- Required name of the data set. description {str} -- Optional description of the data set. owner {str} -- Optional owner of the data set. uri {str} -- Required uri of the data set. version {str} -- Optional version tagged by the user. query {str} -- Optioan query string for how to fetch this data set from a data source. labels {object} -- Optional string key value pairs for labels. Addtional keyword arguments are saved as addtional properties of this dataset.
625941b3d18da76e23532289
def test_entity_batch_number_get(self): <NEW_LINE> <INDENT> pass
Test case for entity_batch_number_get EntityBatchNumber_GET # noqa: E501
625941b3046cf37aa974cb03
def is_connected(self, v, w): <NEW_LINE> <INDENT> return self.id[v] == self.id[w]
Are nodes v and w connected?
625941b3cc40096d6159570d
def display(self, pixels): <NEW_LINE> <INDENT> send_command(DATA_START_TRANSMISSION_1) <NEW_LINE> send_data_list(self._old_buffer) <NEW_LINE> buffer = list(self._buffer_from_pixels(pixels)) <NEW_LINE> send_command(DATA_START_TRANSMISSION_2) <NEW_LINE> send_data_list(buffer) <NEW_LINE> send_command(DISPLAY_REFRESH) <NEW_LINE> self._old_buffer = buffer <NEW_LINE> self.wait_until_idle()
display an image :pixels iterable: list of pixel intensities, must have a length of 400 x 300 items
625941b399cbb53fe67929a0
def generateID(): <NEW_LINE> <INDENT> duckobotID = secrets.token_urlsafe(32) <NEW_LINE> return duckobotID
## Generates an unique ID for an user when is creating a profile.
625941b376e4537e8c35142f
def make_symbolic_state(self, num_examples, theano_rng): <NEW_LINE> <INDENT> if not hasattr(self, 'copies'): <NEW_LINE> <INDENT> self.copies = 1 <NEW_LINE> <DEDENT> if self.copies != 1: <NEW_LINE> <INDENT> raise NotImplementedError() <NEW_LINE> <DEDENT> default_z = T.alloc(self.b, num_examples, self.detector_layer_dim) <NEW_LINE> p_exp, h_exp, p_sample, h_sample = max_pool_channels(z=default_z, pool_size=self.pool_size, theano_rng=theano_rng) <NEW_LINE> assert h_sample.dtype == default_z.dtype <NEW_LINE> return p_sample, h_sample
.. todo:: WRITEME
625941b3046cf37aa974cb04
@_expire(hour=8, tz=_UTC) <NEW_LINE> def otcSymbols(token="", version="stable", filter="", format="json"): <NEW_LINE> <INDENT> return _get( "ref-data/otc/symbols", token=token, version=version, filter=filter, format=format, )
This call returns an array of OTC symbols that IEX Cloud supports for API calls. https://iexcloud.io/docs/api/#otc-symbols 8am, 9am, 12pm, 1pm UTC daily Args: token (str): Access token version (str): API version filter (str): filters: https://iexcloud.io/docs/api/#filter-results format (str): return format, defaults to json Returns: dict or DataFrame or list: result
625941b3c432627299f049fd
def findID(cine,cinemaArray): <NEW_LINE> <INDENT> for x in range(0,12): <NEW_LINE> <INDENT> cine[x] = cine[x]+"'" <NEW_LINE> <DEDENT> for x in range (0,12): <NEW_LINE> <INDENT> currentCinema = cine[x] <NEW_LINE> idString = currentCinema.find('id') <NEW_LINE> endOfID = 0 <NEW_LINE> while endOfID < idString: <NEW_LINE> <INDENT> endOfID = currentCinema.find("''") <NEW_LINE> currentCinema = currentCinema.replace("''",'XX',1) <NEW_LINE> <DEDENT> idValue = currentCinema[idString+6:endOfID] <NEW_LINE> cinemaArray[x-1][0] = idValue <NEW_LINE> <DEDENT> return cinemaArray
In this function it takes the raw cinema list as an input and takes out the relative cinema IDs from the list, putting them into an array.
625941b31b99ca400220a869
def test_error_function_source_is_correct(self): <NEW_LINE> <INDENT> @njit <NEW_LINE> def foo(): <NEW_LINE> <INDENT> np.linalg.svd("chars") <NEW_LINE> <DEDENT> with self.assertRaises(errors.TypingError) as raises: <NEW_LINE> <INDENT> foo() <NEW_LINE> <DEDENT> excstr = str(raises.exception) <NEW_LINE> self.assertIn(error_reasons['specific_error'].splitlines()[0], excstr) <NEW_LINE> expected_file = os.path.join("numba", "np", "linalg.py") <NEW_LINE> expected = f"Overload in function 'svd_impl': File: {expected_file}:" <NEW_LINE> self.assertIn(expected.format(expected_file), excstr)
Checks that the reported source location for an overload is the overload implementation source, not the actual function source from the target library.
625941b31f037a2d8b945fb7
def clear(self): <NEW_LINE> <INDENT> self.items = {} <NEW_LINE> self.weight = 0
Remove all items in the inventory.
625941b3f9cc0f698b1403bf
def srun_run(self): <NEW_LINE> <INDENT> for run_conf in self.__task_conf['srun_run']: <NEW_LINE> <INDENT> cmd = ["sh", str(run_conf['script'])] <NEW_LINE> flags = None <NEW_LINE> try: <NEW_LINE> <INDENT> flags = [] <NEW_LINE> for key, value in run_conf['flags'].items(): <NEW_LINE> <INDENT> flags.append(str(key)) <NEW_LINE> if value is not None: <NEW_LINE> <INDENT> flags.append(str(value)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> self.run_popen_safe(command=self.compose_srun(cmd, self.__hosts_mpi, self.__hosts_total, flags), nodes=self.__hosts_mpi)
SRUN script run on each node via srun --nodelist derived from beefile's node list
625941b3fff4ab517eb2f1f1
def take_gun(self, gun): <NEW_LINE> <INDENT> self.gun = gun
拿枪
625941b32ae34c7f2600ceeb
def _count_valid_files_in_directory(directory, white_list_formats, follow_links): <NEW_LINE> <INDENT> def _recursive_list(subpath): <NEW_LINE> <INDENT> return sorted(os.walk(subpath, followlinks=follow_links), key=lambda x: x[0]) <NEW_LINE> <DEDENT> samples = 0 <NEW_LINE> for _, _, files in _recursive_list(directory): <NEW_LINE> <INDENT> for fname in files: <NEW_LINE> <INDENT> is_valid = False <NEW_LINE> for extension in white_list_formats: <NEW_LINE> <INDENT> if fname.lower().endswith('.tiff'): <NEW_LINE> <INDENT> warnings.warn('Using \'.tiff\' files with multiple bands will cause distortion. ' 'Please verify your output.') <NEW_LINE> <DEDENT> if fname.lower().endswith('.' + extension): <NEW_LINE> <INDENT> is_valid = True <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if is_valid: <NEW_LINE> <INDENT> samples += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return samples
Count files with extension in `white_list_formats` contained in directory. # Arguments directory: absolute path to the directory containing files to be counted white_list_formats: set of strings containing allowed extensions for the files to be counted. follow_links: boolean. # Returns the count of files with extension in `white_list_formats` contained in the directory.
625941b321a7993f00bc7aa1
def pc_work_time_var(self): <NEW_LINE> <INDENT> return _analog_swig.quadrature_demod_cf_sptr_pc_work_time_var(self)
pc_work_time_var(quadrature_demod_cf_sptr self) -> float
625941b3d58c6744b4257a1a
def writeCyml(node): <NEW_LINE> <INDENT> p = CymlGenerator(node) <NEW_LINE> p.visit(node) <NEW_LINE> code= ''.join(p.result) <NEW_LINE> return code
Generate CyML code from an ASG Args: node (Node): Common Abstract Syntax Graph Returns: [str]: CyML code
625941b3ab23a570cc24ff40
def get_square(self, x, y): <NEW_LINE> <INDENT> return self.squares[y * WIDTH + x]
Return 0 for an empty square, 1 or 2 for a player's counter
625941b394891a1f4081b861
def __unhandled_exception(self, exctype, excval, exctb): <NEW_LINE> <INDENT> self.mainThread.user_exception((exctype, excval, exctb), True)
Private method called to report an uncaught exception. @param exctype the type of the exception @param excval data about the exception @param exctb traceback for the exception
625941b37047854f462a11c7
def draw_figure(canvas, figure, loc=(0, 0)): <NEW_LINE> <INDENT> figure_canvas_agg = FigureCanvasAgg(figure) <NEW_LINE> figure_canvas_agg.draw() <NEW_LINE> figure_x, figure_y, figure_w, figure_h = figure.bbox.bounds <NEW_LINE> figure_w, figure_h = int(figure_w), int(figure_h) <NEW_LINE> photo = Tk.PhotoImage(master=canvas, width=figure_w, height=figure_h) <NEW_LINE> canvas.create_image(loc[0] + figure_w/2, loc[1] + figure_h/2, image=photo) <NEW_LINE> tkagg.blit(photo, figure_canvas_agg.get_renderer()._renderer, colormode=2) <NEW_LINE> return photo
Draw a matplotlib figure onto a Tk canvas loc: location of top-left corner of figure on canvas in pixels. Inspired by matplotlib source: lib/matplotlib/backends/backend_tkagg.py
625941b3fff4ab517eb2f1f2
def adjust_for_complexity(self, n): <NEW_LINE> <INDENT> return np.vstack([np.ones(len(n)), np.log(n)]).T
See Documentation here https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
625941b3925a0f43d2549c2c
def Get(self, request, global_params=None): <NEW_LINE> <INDENT> config = self.GetMethodConfig('Get') <NEW_LINE> return self._RunMethod( config, request, global_params=global_params)
Get an existing environment. Args: request: (ComposerProjectsLocationsEnvironmentsGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Environment) The response message.
625941b35166f23b2e1a4f11
def capture_cloud_and_transform(pc_topic, source_frame, target_frame): <NEW_LINE> <INDENT> pcl_pc = capture_cloud(pc_topic) <NEW_LINE> tf_msg = capture_tf_msg(source_frame, target_frame) <NEW_LINE> cf2obj_tf_msg = tf_conversions.posemath.fromTf(tf_msg) <NEW_LINE> cf2obj_tf_mat = tf_conversions.posemath.toMatrix(cf2obj_tf_msg) <NEW_LINE> np_pc = pcl_pc.to_array() <NEW_LINE> np_pc = transform_cloud(np_pc, cf2obj_tf_mat) <NEW_LINE> np_pc = np_pc.astype(np.float32) <NEW_LINE> pc = pypcd.Pointcloud.from_array(np_pc) <NEW_LINE> return pc
:param pc_topic: :type pc_topic: str :param source_frame: :type source_frame: str :param target_frame: :type target_frame: str :return: :rtype: pypcd.PointCloud
625941b3fbf16365ca6f5f7c
@contextlib.contextmanager <NEW_LINE> def MockArbitraryBuffer( filetype ): <NEW_LINE> <INDENT> current_buffer = VimBuffer( os.path.realpath( 'TEST_BUFFER' ), filetype = filetype ) <NEW_LINE> with MockVimBuffers( [ current_buffer ], [ current_buffer ] ): <NEW_LINE> <INDENT> yield
Used via the with statement, set up a single buffer with an arbitrary name and no contents. Its filetype is set to the supplied filetype.
625941b35166f23b2e1a4f12
def Title(self): <NEW_LINE> <INDENT> return self.title
Returns the title. The function is used by Plone to render <title> correctly.
625941b38a43f66fc4b53e24
def generate_job_definition(jjb_data): <NEW_LINE> <INDENT> template = get_template(jjb_data['tester']) <NEW_LINE> j2_env = Environment(loader=FunctionLoader(get_template), trim_blocks=True) <NEW_LINE> template = j2_env.get_template(jjb_data['tester']) <NEW_LINE> return template.render(jjb=jjb_data)
Returns a multi-line string which is the jenkins job definition based on the given arguments.
625941b3cdde0d52a9e52dee
def cmd_garoi(self, nick, args, admin): <NEW_LINE> <INDENT> rules = [ ('ddzs', 'CCS'), ('dzs', 'CS'), ('ccs', 'DDZS'), ('ddz', 'TSSZ'), ('ggy', 'TTY'), ('lly', 'JJ'), ('ssz', 'ZZ'), ('tty', 'GGY'), ('zzs', 'SS'), ('dj', 'gy'), ('sz', 'Z'), ('ch', 'cs'), ('ts', 'cs'), ('cs', 'DZS'), ('dz', 'TSZ'), ('gy', 'TY'), ('ly', 'J'), ('cc', 'TSSZ'), ('ty', 'GY'), ('zs', 'S'), ('jj', 'LLY'), ('zz', 'SSZ'), ('b', 'P'), ('c', 'TSZ'), ('d', 'T'), ('f', 'V'), ('g', 'K'), ('j', 'LY'), ('k', 'G'), ('p', 'B'), ('s', 'ZS'), ('s', 'ZZS'), ('t', 'D'), ('v', 'F'), ('z', 'SZ'), ('x', 'GZ')] <NEW_LINE> if not args: <NEW_LINE> <INDENT> return 'Mid agarz?' <NEW_LINE> <DEDENT> text = args.lower() <NEW_LINE> for _f, _t in rules: <NEW_LINE> <INDENT> text = text.replace(_f, _t) <NEW_LINE> <DEDENT> return text.lower()
garoi fordító. Használat: !garoi [kifejezés]
625941b3b57a9660fec33638
@click.group() <NEW_LINE> @click.option('--config', 'config_file', type=click.Path(exists=True)) <NEW_LINE> @click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True) <NEW_LINE> @pass_config <NEW_LINE> def cli(config, config_file): <NEW_LINE> <INDENT> if config_file: <NEW_LINE> <INDENT> config.file = config_file <NEW_LINE> <DEDENT> config.load()
Loads config file. This function is entry point.
625941b3d8ef3951e32432f7
def _decode_header(fp): <NEW_LINE> <INDENT> magic = _decode_magic(fp) <NEW_LINE> if magic not in MAGIC_NUMBERS: <NEW_LINE> <INDENT> raise unknownMagic('Magic number is not valid.') <NEW_LINE> <DEDENT> ts = _decode_timestamp_bytes(fp, magic) <NEW_LINE> size = None <NEW_LINE> if MAGIC_NUMBERS[magic][0] >= 3.0: <NEW_LINE> <INDENT> size = _decode_size_bytes(fp) <NEW_LINE> <DEDENT> return magic, ts, size
Returns the decoded header part of pyc file. pyc file structure varied from 2.x to 3.x or even between 3.x versions. At this point pydis only supporting 3.x versions. We see there is a change happened at 3393 ==> 3.7b1 version.
625941b3adb09d7d5db6c54d
def compareSwitches(self): <NEW_LINE> <INDENT> if self.first_science_tuple[0] is None: <NEW_LINE> <INDENT> switches_timetag = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> switches_timetag = self.obs[self.first_science_tuple[0]].switches <NEW_LINE> <DEDENT> if self.first_science_tuple[1] is None: <NEW_LINE> <INDENT> switches_accum = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> switches_accum = self.obs[self.first_science_tuple[1]].switches <NEW_LINE> <DEDENT> message_printed = False <NEW_LINE> for obs in self.obs: <NEW_LINE> <INDENT> obs.openTrailer() <NEW_LINE> if obs.info["obsmode"] == "TIME-TAG": <NEW_LINE> <INDENT> switches = switches_timetag <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> switches = switches_accum <NEW_LINE> <DEDENT> keys = sorted(switches) <NEW_LINE> for key in keys: <NEW_LINE> <INDENT> compare = switches[key].strip() <NEW_LINE> sw = obs.switches[key].strip() <NEW_LINE> if sw != compare: <NEW_LINE> <INDENT> if obs.exp_type == EXP_WAVECAL: <NEW_LINE> <INDENT> if key in ["wavecorr", "doppcorr", "helcorr", "fluxcorr", "tdscorr"]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> if not message_printed: <NEW_LINE> <INDENT> cosutil.printWarning( "Inconsistent calibration switches:") <NEW_LINE> message_printed = True <NEW_LINE> <DEDENT> if len(compare) == 0: <NEW_LINE> <INDENT> compare = "(blank)" <NEW_LINE> <DEDENT> if len(sw) == 0: <NEW_LINE> <INDENT> sw = "(blank)" <NEW_LINE> <DEDENT> cosutil.printMsg(obs.input + ": " + key + " = " + sw + " vs. " + compare) <NEW_LINE> <DEDENT> <DEDENT> obs.closeTrailer()
Compare switches. This function compares the values of the calibration switch keywords in the observation list. If there is any mismatch, a warning will be printed giving the values in the first science observation (can be different for time-tag vs accum) and in the current observation.
625941b36aa9bd52df036b5b
def map(self, f=None, f_vect=None): <NEW_LINE> <INDENT> assert (f is not None) ^ (f_vect is not None) <NEW_LINE> parent = self <NEW_LINE> class Transformed(Generator): <NEW_LINE> <INDENT> def generate(self, size): <NEW_LINE> <INDENT> samples = parent.generate(size=size) <NEW_LINE> if f_vect is not None: <NEW_LINE> <INDENT> return [i for i in f_vect(samples)] <NEW_LINE> <DEDENT> elif f is not None: <NEW_LINE> <INDENT> return [f(sample) for sample in samples] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return Transformed()
Creates a new generator that transforms the generated values with the provided function. # TODO: do we really need "non vectorialized" f and vectorialized f_vect? Have a look at numpy ufunc...
625941b34e696a04525c920e
def determine_log_level(): <NEW_LINE> <INDENT> from_env_vars = os.getenv('ACME_LOGLEVEL') <NEW_LINE> from_config = conf.get('logging', 'level') <NEW_LINE> from_default = DEFAULT_LEVEL_NAME <NEW_LINE> flag = any_of( from_config, from_env_vars, from_default, ) <NEW_LINE> found = re.search(r'(?P<name>debug|info|warning|error|critical|notset)', flag, re.IGNORECASE) <NEW_LINE> if not found: <NEW_LINE> <INDENT> return logging.DEBUG <NEW_LINE> <DEDENT> level_name = found.group('name').upper() <NEW_LINE> return getattr(logging, level_name, DEFAULT_LEVEL_NAME)
attempts to retrieve the log level from the environment variable ``ACME_LOGLEVEL``
625941b33539df3088e2e105
def lower(self): <NEW_LINE> <INDENT> return str(self).lower()
This exists solely for django-debug-toolbar compatibility. At some point I hope to investigate making SelectCommand a GQL string so that we don't need to do this hackery
625941b3e5267d203edcda5b
def rescale_positions(nodes): <NEW_LINE> <INDENT> max_height = max(n.height for n in nodes) <NEW_LINE> max_xpos = max(n.xpos for n in nodes) <NEW_LINE> min_xpos = min(n.xpos for n in nodes) <NEW_LINE> for node in nodes: <NEW_LINE> <INDENT> node.xpos = (node.xpos - min_xpos) / (max_xpos-min_xpos) <NEW_LINE> node.height = node.height / max_height
Rescales xpos and heights to 0,1 range
625941b3377c676e91271f6b
def _rescaleLinearly(targetVector,newLength): <NEW_LINE> <INDENT> assert(np.isscalar(newLength)) <NEW_LINE> x_old = np.linspace(0,1,len(targetVector)) <NEW_LINE> x_new = np.linspace(0,1,newLength) <NEW_LINE> interp = scipy.interpolate.interp1d(x_old,targetVector,fill_value="extrapolate") <NEW_LINE> return interp(x_new)
Take a vector with an undersirable number of data points (e.g. twist/beta matrix) and rescales it to the desired dimension, assumes equally spaced data points. targetVector: Vector of points that need to be rescaled to the appropriate number newLength: Desired number of points
625941b34d74a7450ccd3f7d
def click_documents_grid_save_changes_button(self): <NEW_LINE> <INDENT> is_clicked = None <NEW_LINE> try: <NEW_LINE> <INDENT> self.logger.info('Start: click documents grid save changes button') <NEW_LINE> self._bill_and_audit_page.click_documents_grid_save_changes_button() <NEW_LINE> is_clicked = True <NEW_LINE> <DEDENT> except WebDriverException as exp: <NEW_LINE> <INDENT> is_clicked = False <NEW_LINE> self.logger.error(exp.msg) <NEW_LINE> raise <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> self.logger.info('End: click documents grid save changes button') <NEW_LINE> return is_clicked
Returning click documents grid save changes button Implementing logging for click documents grid save changes button functionality :return: True/False
625941b376d4e153a657e8e9
def listdir(self, path: str) -> list: <NEW_LINE> <INDENT> file_objects = self.filesystem.listdir(Path(path)) <NEW_LINE> return [file_object.name for file_object in file_objects]
Return a list containing the names of the entries in the directory given by path. The list is in arbitrary order, and does not include the special entries '.' and '..' even if they are present in the directory.
625941b331939e2706e4cc2b
def testMissingEndOfScopeCommentWithOtherComment(self): <NEW_LINE> <INDENT> original = [ 'goog.scope(function() {', "}); // I don't belong here!", ] <NEW_LINE> expected = [ 'goog.scope(function() {', '}); // goog.scope', ] <NEW_LINE> self._AssertFixes(original, expected)
Tests handling an irrelevant comment at end of goog.scope.
625941b330bbd722463cbb7d
def __init__(self, linenum, indent): <NEW_LINE> <INDENT> self.linenum = linenum <NEW_LINE> self.indent = indent
:param linenum: The line number of the frame. :type linenum: int :param indent: The indentation level of the frame. :type indent: int
625941b360cbc95b062c6303
def data(f_train, D, dayfilter = None, dayfeature = True, counters = False): <NEW_LINE> <INDENT> device_ip_counter = {} <NEW_LINE> device_id_counter = {} <NEW_LINE> for t, row in enumerate(DictReader(f_train)): <NEW_LINE> <INDENT> ID = row['id'] <NEW_LINE> del row['id'] <NEW_LINE> y = 0. <NEW_LINE> if 'click' in row: <NEW_LINE> <INDENT> if row['click'] == '1': <NEW_LINE> <INDENT> y = 1. <NEW_LINE> <DEDENT> del row['click'] <NEW_LINE> <DEDENT> date = row['hour'][0:6] <NEW_LINE> row['hour'] = row['hour'][6:] <NEW_LINE> if dayfilter != None and not date in dayfilter: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if dayfeature: <NEW_LINE> <INDENT> row['wd'] = str(int(date) % 7) <NEW_LINE> row['wd_hour'] = "%s_%s" % (row['wd'], row['hour']) <NEW_LINE> <DEDENT> if counters: <NEW_LINE> <INDENT> d_ip = row['device_ip'] <NEW_LINE> d_id = row ["device_id"] <NEW_LINE> try: <NEW_LINE> <INDENT> device_ip_counter [d_ip] += 1 <NEW_LINE> device_id_counter [d_id] += 1 <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> device_ip_counter [d_ip] = 1 <NEW_LINE> device_id_counter [d_id] = 1 <NEW_LINE> <DEDENT> row["ipc"] = str(min(device_ip_counter[d_ip], 8)) <NEW_LINE> row["idc"] = str(min(device_id_counter[d_id], 8)) <NEW_LINE> <DEDENT> x = [0] <NEW_LINE> for key in row: <NEW_LINE> <INDENT> value = row[key] <NEW_LINE> index = abs(hash(key + '_' + value)) % D <NEW_LINE> x.append(index) <NEW_LINE> <DEDENT> yield t, ID, x, y
GENERATOR: Apply hash-trick to the original csv row and for simplicity, we one-hot-encode everything INPUT: path: path to training or testing file D: the max index that we can hash to YIELDS: ID: id of the instance, mainly useless x: a list of hashed and one-hot-encoded 'indices' we only need the index since all values are either 0 or 1 y: y = 1 if we have a click, else we have y = 0
625941b3167d2b6e31218958
def get(self, id): <NEW_LINE> <INDENT> student = Student.query.filter(Student.id == id).one() <NEW_LINE> data = StudentSchema().dump(student) <NEW_LINE> return data
Returns a student.
625941b3596a897236089885
def _calc_final_dist(self, vocab_dists, attn_dists, p_gens=None, use_reinforce=False): <NEW_LINE> <INDENT> with tf.variable_scope('final_distribution'): <NEW_LINE> <INDENT> if(use_reinforce): <NEW_LINE> <INDENT> tf.get_variable_scope().reuse_variables() <NEW_LINE> vocab_dists = [p_gen * dist for (p_gen,dist) in zip(p_gens, vocab_dists)] <NEW_LINE> attn_dists = [(1-p_gen) * dist for (p_gen,dist) in zip(p_gens, attn_dists)] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> vocab_dists = [p_gen * dist for (p_gen,dist) in zip(self.p_gens, vocab_dists)] <NEW_LINE> attn_dists = [(1-p_gen) * dist for (p_gen,dist) in zip(self.p_gens, attn_dists)] <NEW_LINE> <DEDENT> extended_vsize = self._vocab.size() + self._max_art_oovs <NEW_LINE> extra_zeros = tf.zeros((self._hps.batch_size, self._max_art_oovs)) <NEW_LINE> vocab_dists_extended = [tf.concat(axis=1, values=[dist, extra_zeros]) for dist in vocab_dists] <NEW_LINE> batch_nums = tf.range(0, limit=self._hps.batch_size) <NEW_LINE> batch_nums = tf.expand_dims(batch_nums, 1) <NEW_LINE> attn_len = tf.shape(self._enc_batch_extend_vocab)[1] <NEW_LINE> batch_nums = tf.tile(batch_nums, [1, attn_len]) <NEW_LINE> indices = tf.stack( (batch_nums, self._enc_batch_extend_vocab), axis=2) <NEW_LINE> shape = [self._hps.batch_size, extended_vsize] <NEW_LINE> attn_dists_projected = [tf.scatter_nd(indices, copy_dist, shape) for copy_dist in attn_dists] <NEW_LINE> final_dists = [vocab_dist + copy_dist for (vocab_dist,copy_dist) in zip(vocab_dists_extended, attn_dists_projected)] <NEW_LINE> return final_dists
Calculate the final distribution, for the pointer-generator model Args: vocab_dists: The vocabulary distributions. List length max_dec_steps of (batch_size, vsize) arrays. The words are in the order they appear in the vocabulary file. attn_dists: The attention distributions. List length max_dec_steps of (batch_size, attn_len) arrays Returns: final_dists: The final distributions. List length max_dec_steps of (batch_size, extended_vsize) arrays.
625941b394891a1f4081b862
def exit_handler (event): <NEW_LINE> <INDENT> code = 1 <NEW_LINE> if hasattr(event, "exit_code"): <NEW_LINE> <INDENT> code = event.exit_code <NEW_LINE> <DEDENT> with open("exit.out.txt", 'w') as f: <NEW_LINE> <INDENT> f.write("{}".format(code))
write exit code of the program running in gdb to a file called exit.out.txt
625941b385dfad0860c3ac12
def forward( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, ): <NEW_LINE> <INDENT> x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, full_context_alignment=full_context_alignment, alignment_layer=alignment_layer, alignment_heads=alignment_heads, ) <NEW_LINE> if not features_only: <NEW_LINE> <INDENT> x = self.output_layer(x) <NEW_LINE> <DEDENT> return x, extra
Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention, should be of size T x B x C incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs
625941b3baa26c4b54cb0ede
def _add_import(self): <NEW_LINE> <INDENT> fromItem = self._lineFrom.text() <NEW_LINE> importItem = self._lineImport.text() <NEW_LINE> if fromItem in self._froms: <NEW_LINE> <INDENT> lineno = 0 <NEW_LINE> for imp in self._imports['fromImports']: <NEW_LINE> <INDENT> lineno = self._imports['fromImports'][imp]['lineno'] - 1 <NEW_LINE> if self._imports['fromImports'][imp]['module'] == fromItem: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> block = self._editorWidget.document().findBlockByLineNumber(lineno) <NEW_LINE> cursor = self._editorWidget.textCursor() <NEW_LINE> cursor.setPosition(block.position()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cursor = self._editorWidget.textCursor() <NEW_LINE> cursor.movePosition(QTextCursor.Start) <NEW_LINE> <DEDENT> cursor.movePosition(QTextCursor.EndOfLine) <NEW_LINE> if fromItem: <NEW_LINE> <INDENT> importLine = '\nfrom {0} import {1}'.format(fromItem, importItem) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> importLine = '\nimport {0}'.format(importItem) <NEW_LINE> <DEDENT> if self._editorWidget.document().find(importLine[1:]).position() == -1: <NEW_LINE> <INDENT> cursor.insertText(importLine) <NEW_LINE> <DEDENT> self.close()
Get From item and Import item and add the import on the code
625941b30fa83653e4656d7f
def __init__(self): <NEW_LINE> <INDENT> self.__mssg = []
@brief Constructor.
625941b3e64d504609d745fa
def kinship(K, nclusters=1, img_kws=None, ax=None): <NEW_LINE> <INDENT> from numpy import asarray, clip, percentile <NEW_LINE> plt = get_pyplot() <NEW_LINE> ax = plt.gca() if ax is None else ax <NEW_LINE> if img_kws is None: <NEW_LINE> <INDENT> img_kws = dict() <NEW_LINE> <DEDENT> if "cmap" not in img_kws: <NEW_LINE> <INDENT> img_kws["cmap"] = "RdBu_r" <NEW_LINE> <DEDENT> K = asarray(K, float) <NEW_LINE> if nclusters == "auto": <NEW_LINE> <INDENT> K = _infer_clustering(K) <NEW_LINE> <DEDENT> elif nclusters > 1: <NEW_LINE> <INDENT> K = _clustering(K, nclusters) <NEW_LINE> <DEDENT> cmin = percentile(K, 2) <NEW_LINE> cmax = percentile(K, 98) <NEW_LINE> K = clip(K, cmin, cmax) <NEW_LINE> K = (K - K.min()) / (K.max() - K.min()) <NEW_LINE> mesh = ax.pcolormesh(K, **img_kws) <NEW_LINE> ax.set_aspect("equal") <NEW_LINE> ax.set(xlim=(0, K.shape[1]), ylim=(0, K.shape[0])) <NEW_LINE> ax.xaxis.set_ticks([]) <NEW_LINE> ax.yaxis.set_ticks([]) <NEW_LINE> ax.figure.colorbar(mesh, None, ax)
Plot heatmap of a kinship matrix. Parameters ---------- K : 2d-array Kinship matrix. nclusters : int, str, optional Number of blocks to be seen from the heatmap. It defaults to ``1``, which means that no ordering is performed. Pass ``"auto"`` to automatically determine the number of clusters. Pass an integer to select the number of clusters. img_kws : dict, optional Keyword arguments forwarded to the matplotlib pcolormesh function. ax : matplotlib Axes, optional The target handle for this figure. If ``None``, the current axes is set. Example ------- .. plot:: >>> import limix_plot as lp >>> >>> K = lp.load_dataset("kinship") >>> lp.kinship(K)
625941b37c178a314d6ef212
def __call__(self, obj, tag=None, *args, **kwargs): <NEW_LINE> <INDENT> self.save(obj, tag=tag, *args, **kwargs)
Same as :meth:`save`
625941b345492302aab5e07a
def get_object_filter(self, queryset): <NEW_LINE> <INDENT> filter_class = self.get_filter_class() <NEW_LINE> return filter_class(self.request.GET, queryset)
Return the instance of the filter class for the 'queryset'from ..mixins import FilterMixin
625941b355399d3f0558846d
def get_all(self) -> dict: <NEW_LINE> <INDENT> return self._utility()
This method returns the value of any metadata parameter query if found and raises error if not found args: path: for command_line use returns: A dictionary of all parameters in the metadata file :exception: AssertionError
625941b39b70327d1c4e0b8e
def test_get_best_n_queues_pack_complex(self): <NEW_LINE> <INDENT> simulation.set_param("queue_selection", "pack") <NEW_LINE> placement = self.front_end.get_best_n_queues(self.queues, 15) <NEW_LINE> expected_placement = [("i", 0), ("i", 1), ("i", 2), ("i", 3), ("i", 4), ("i", 5), ("c", 1), ("c", 2), ("c", 3), ("c", 4), ("c", 5), ("d", 3), ("d", 4), ("d", 5), ("h", 4)] <NEW_LINE> self.assert_lists_equal(expected_placement, placement)
Tests "pack" placement in a more complex scenario. For this test, not all queues have the same length at the end.
625941b3b7558d58953c4cd7
def rule_keys(self): <NEW_LINE> <INDENT> return self._rules.keys()
return list of keys which have associated rules
625941b330dc7b7665901725
def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, Duration): <NEW_LINE> <INDENT> return NotImplemented <NEW_LINE> <DEDENT> return self._value == other._value
Returns True if other is equal to this object.
625941b35e10d32532c5ecea
def get_domain_metadata(domain, domain_type, first_seen=True, last_ckeck=True, status=True, ports=True, tags=False, tags_safe=False, languages=False, screenshot=False): <NEW_LINE> <INDENT> dict_metadata = {} <NEW_LINE> dict_metadata['id'] = domain <NEW_LINE> dict_metadata['type'] = domain_type <NEW_LINE> if first_seen: <NEW_LINE> <INDENT> res = get_domain_first_seen(domain, domain_type=domain_type) <NEW_LINE> if res is not None: <NEW_LINE> <INDENT> dict_metadata['first_seen'] = res <NEW_LINE> <DEDENT> <DEDENT> if last_ckeck: <NEW_LINE> <INDENT> res = get_domain_last_check(domain, domain_type=domain_type) <NEW_LINE> if res is not None: <NEW_LINE> <INDENT> dict_metadata['last_check'] = res <NEW_LINE> <DEDENT> <DEDENT> if status: <NEW_LINE> <INDENT> dict_metadata['status'] = is_domain_up(domain, domain_type) <NEW_LINE> <DEDENT> if ports: <NEW_LINE> <INDENT> dict_metadata['ports'] = get_domain_all_ports(domain, domain_type) <NEW_LINE> <DEDENT> if tags: <NEW_LINE> <INDENT> dict_metadata['tags'] = get_domain_tags(domain) <NEW_LINE> <DEDENT> if tags_safe: <NEW_LINE> <INDENT> if tags: <NEW_LINE> <INDENT> dict_metadata['is_tags_safe'] = Tag.is_tags_safe(dict_metadata['tags']) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dict_metadata['is_tags_safe'] = Tag.is_tags_safe(get_domain_tags(domain)) <NEW_LINE> <DEDENT> <DEDENT> if languages: <NEW_LINE> <INDENT> dict_metadata['languages'] = Language.get_languages_from_iso(get_domain_languages(domain, r_list=True), sort=True) <NEW_LINE> <DEDENT> if screenshot: <NEW_LINE> <INDENT> dict_metadata['screenshot'] = get_domain_random_screenshot(domain) <NEW_LINE> <DEDENT> return dict_metadata
Get Domain basic metadata :param first_seen: get domain first_seen :type first_seen: boolean :param last_ckeck: get domain last_check :type last_ckeck: boolean :param ports: get all domain ports :type ports: boolean :param tags: get all domain tags :type tags: boolean :return: a dict of all metadata for a given domain :rtype: dict
625941b3adb09d7d5db6c54e
@lab.route('/procedures/add', methods=['GET', 'POST']) <NEW_LINE> @login_required <NEW_LINE> @privilege_required(Privilege.MODERATE) <NEW_LINE> def add_procedure(): <NEW_LINE> <INDENT> form = ProcedureForm() <NEW_LINE> if form.validate_on_submit(): <NEW_LINE> <INDENT> procedure = LabProcedure(title=form.title.data, content=form.procedure_content.data, author=current_user._get_current_object()) <NEW_LINE> database.create(procedure) <NEW_LINE> flash('Added procedure', 'success') <NEW_LINE> return redirect(url_for('lab.all_procedures')) <NEW_LINE> <DEDENT> return render_template('lab/procedures/add_procedure.html', form=form)
Route to display form to add a Standard Operating Procedure for Lab
625941b363d6d428bbe442b1
def spawn_jobs(self, func, items, cfg): <NEW_LINE> <INDENT> njobs = self.njobs <NEW_LINE> assert njobs <= len(items) <NEW_LINE> self.input = self.Queue() <NEW_LINE> self.output = self.Queue() <NEW_LINE> self.processes = [ self.Process( target=worker, name=str(n), args=(func, self.input, self.output, cfg, n) ) for n in range(njobs) ] <NEW_LINE> for p in self.processes: <NEW_LINE> <INDENT> p.start() <NEW_LINE> <DEDENT> for item in items[:njobs]: <NEW_LINE> <INDENT> self.input.put(item) <NEW_LINE> <DEDENT> if clog().isEnabledFor(CHAT): <NEW_LINE> <INDENT> for p in self.processes: <NEW_LINE> <INDENT> clog().chat("Worker %s.", p.name) <NEW_LINE> <DEDENT> <DEDENT> return items[njobs:]
Spawn <njobs> jobs to process <items> in parallel/concurrently. The processes are started and feeded with the first <njobs> items in <items>, the rest of them need to be pushed manually calling send_next_item_from; the result of each file processed can be fetched from the <output>. Return the <rest> of the <items> not sent, and the <output> queue.
625941b31b99ca400220a86a
@slim.add_arg_scope <NEW_LINE> def split_separable_conv2d(input_tensor, num_outputs, scope=None, normalizer_fn=None, stride=1, rate=1, endpoints=None, use_explicit_padding=False): <NEW_LINE> <INDENT> with contextlib2.ExitStack() as stack: <NEW_LINE> <INDENT> if scope is None: <NEW_LINE> <INDENT> s = stack.enter_context(tf.variable_scope(None, default_name='separable')) <NEW_LINE> stack.enter_context(tf.name_scope(s.original_name_scope)) <NEW_LINE> scope = '' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> scope += '_' <NEW_LINE> <DEDENT> dw_scope = scope + 'depthwise' <NEW_LINE> endpoints = endpoints if endpoints is not None else {} <NEW_LINE> kernel_size = [3, 3] <NEW_LINE> padding = 'SAME' <NEW_LINE> if use_explicit_padding: <NEW_LINE> <INDENT> padding = 'VALID' <NEW_LINE> input_tensor = _fixed_padding(input_tensor, kernel_size, rate) <NEW_LINE> <DEDENT> net = slim.separable_conv2d( input_tensor, None, kernel_size, depth_multiplier=1, stride=stride, rate=rate, normalizer_fn=normalizer_fn, padding=padding, scope=dw_scope) <NEW_LINE> endpoints[dw_scope] = net <NEW_LINE> pw_scope = scope + 'pointwise' <NEW_LINE> net = slim.conv2d( net, num_outputs, [1, 1], stride=1, normalizer_fn=normalizer_fn, scope=pw_scope) <NEW_LINE> endpoints[pw_scope] = net <NEW_LINE> <DEDENT> return net
Separable mobilenet V1 style convolution. Depthwise convolution, with default non-linearity, followed by 1x1 depthwise convolution. This is similar to slim.separable_conv2d, but differs in tha it applies batch normalization and non-linearity to depthwise. This matches the basic building of Mobilenet Paper (https://arxiv.org/abs/1704.04861) Args: input_tensor: input num_outputs: number of outputs scope: optional name of the scope. Note if provided it will use scope_depthwise for deptwhise, and scope_pointwise for pointwise. normalizer_fn: which normalizer function to use for depthwise/pointwise stride: stride rate: output rate (also known as dilation rate) endpoints: optional, if provided, will export additional tensors to it. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. Returns: output tesnor
625941b36e29344779a623d1
def __getitem__(self, idx): <NEW_LINE> <INDENT> pass
To be overwritten.
625941b3ec188e330fd5a563
def sample_with_small_evaluation(variable, degree, max_abs_input, entropy): <NEW_LINE> <INDENT> assert max_abs_input >= 1 <NEW_LINE> entropies = entropy * np.random.dirichlet(np.ones(degree + 1)) <NEW_LINE> coeffs = [] <NEW_LINE> for power in range(degree + 1): <NEW_LINE> <INDENT> delta = 0.5 * (degree - 2 * power) * math.log10(max_abs_input) <NEW_LINE> power_entropy = entropies[power] + delta <NEW_LINE> min_abs = 1 if power == degree else 0 <NEW_LINE> coeff = number.integer(power_entropy, signed=True, min_abs=min_abs) <NEW_LINE> coeffs.append(coeff) <NEW_LINE> <DEDENT> terms = [monomial(coeff, variable, power) for power, coeff in enumerate(coeffs)] <NEW_LINE> return ops.Add(*terms)
Generates a (canonically ordered) polynomial, with bounded evaluation. The coefficients are chosen to make use of the entropy, with the scaling adjusted so that all give roughly the same contribution to the output of the polynomial when the input is bounded in magnitude by `max_abs_input`. Args: variable: Variable to use in polynomial. degree: Degree of polynomial. max_abs_input: Number >= 1; max absolute value of input. entropy: Float; randomness for generating polynomial. Returns: Instance of `ops.Add`.
625941b321bff66bcd684710
def update(self, entity, params): <NEW_LINE> <INDENT> pk_name = self.get_primary_field(entity) <NEW_LINE> obj = entity.get_from_id(params[pk_name]) <NEW_LINE> params.pop('_id') <NEW_LINE> params.pop('sprox_id') <NEW_LINE> params.pop('_method') <NEW_LINE> for key, value in params.items(): <NEW_LINE> <INDENT> if key not in entity.structure: <NEW_LINE> <INDENT> continue; <NEW_LINE> <DEDENT> value = self._cast_value(entity, key, value) <NEW_LINE> if value is not None: <NEW_LINE> <INDENT> setattr(obj,key,value) <NEW_LINE> <DEDENT> <DEDENT> obj.save() <NEW_LINE> return obj
Update an entry of type entity which matches the params.
625941b373bcbd0ca4b2be37
def revert_all(self): <NEW_LINE> <INDENT> write_registers = self.get_registers_by_type("byte", False) <NEW_LINE> for register in write_registers: <NEW_LINE> <INDENT> self.revert_point(register.point_name)
Sets all points on the device to their default values
625941b3ff9c53063f47bfb9
def _get_license(self, picking): <NEW_LINE> <INDENT> license = picking.carrier_id.postlogistics_license_id <NEW_LINE> if not license: <NEW_LINE> <INDENT> company_licenses = picking.company_id.postlogistics_license_ids <NEW_LINE> group = picking.carrier_id.postlogistics_service_group_id <NEW_LINE> if not company_licenses or not group: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> group_license_ids = [l.id for l in group.postlogistics_license_ids] <NEW_LINE> if not group_license_ids: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> license = [l for l in company_licenses if l.id in group_license_ids][0] <NEW_LINE> <DEDENT> return license.number
Get the license Take it from carrier and if not defined get the first license depending on service group. This needs to have associated licenses to groups. :return: license number
625941b332920d7e50b27f8b
def group_name(self, predmetnik): <NEW_LINE> <INDENT> assert len(predmetnik) == 5, "Predmetnik not complete " + str(predmetnik) <NEW_LINE> short_name = "{0}_{1}-{2}".format( predmetnik[5]['short_title'], predmetnik[1]['short_title'], predmetnik[2]['short_title'], ) <NEW_LINE> name = "{0}, {1}, {2}".format( predmetnik[5]['title']['sl'], predmetnik[2]['title']['sl'], predmetnik[1]['title']['sl'], ) <NEW_LINE> return short_name, name
Generate tuple (name, short_name) from Studis predmetnik entry.
625941b34f6381625f114801
@contract(H="array[3x3],skew_symmetric", returns="array[3]") <NEW_LINE> def map_hat(H: se2value): <NEW_LINE> <INDENT> v = np.zeros(3) <NEW_LINE> v[2] = -H[0, 1] <NEW_LINE> v[1] = H[0, 2] <NEW_LINE> v[0] = -H[1, 2] <NEW_LINE> return v
The inverse of :py:func:`hat_map`.
625941b3462c4b4f79d1d48a
def lastIndexOf(self, QXmlStreamAttribute, int_from=-1): <NEW_LINE> <INDENT> return 0
QXmlStreamAttributes.lastIndexOf(QXmlStreamAttribute, int from=-1) -> int
625941b3099cdd3c635f0a17
def save_mfcc(dataset_path, json_path, num_mfcc=15, n_fft=4096, hop_length=2048, num_segments=1): <NEW_LINE> <INDENT> data = { "mapping": [], "labels": [], "mfcc": [] } <NEW_LINE> samples_per_segment = int(SAMPLES_PER_TRACK / num_segments) <NEW_LINE> num_mfcc_vectors_per_segment = math.ceil(samples_per_segment / hop_length) <NEW_LINE> for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)): <NEW_LINE> <INDENT> if dirpath is not dataset_path: <NEW_LINE> <INDENT> semantic_label = dirpath.split("/")[-1] <NEW_LINE> data["mapping"].append(semantic_label) <NEW_LINE> print("\nProcessing: {}".format(semantic_label)) <NEW_LINE> for f in filenames: <NEW_LINE> <INDENT> file_path = os.path.join(dirpath, f) <NEW_LINE> signal, sample_rate = librosa.load(file_path, sr=SAMPLE_RATE) <NEW_LINE> for d in range(num_segments): <NEW_LINE> <INDENT> start = samples_per_segment * d <NEW_LINE> finish = start + samples_per_segment <NEW_LINE> mfcc = librosa.feature.mfcc(signal[start:finish], sample_rate, n_mfcc=num_mfcc, n_fft=n_fft, hop_length=hop_length) <NEW_LINE> mfcc = mfcc.T <NEW_LINE> if len(mfcc) == num_mfcc_vectors_per_segment: <NEW_LINE> <INDENT> data["mfcc"].append(mfcc.tolist()) <NEW_LINE> data["labels"].append(i - 1) <NEW_LINE> print("{}, segment:{}".format(file_path, d + 1)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> with open(json_path, "w") as fp: <NEW_LINE> <INDENT> json.dump(data, fp, indent=4)
Extracts MFCCs from music dataset and saves them into a json file along witgh genre labels. :param dataset_path (str): Path to dataset :param json_path (str): Path to json file used to save MFCCs :param num_mfcc (int): Number of coefficients to extract :param n_fft (int): Interval we consider to apply FFT. Measured in # of samples :param hop_length (int): Sliding window for FFT. Measured in # of samples :param: num_segments (int): Number of segments we want to divide sample tracks into :return:
625941b331939e2706e4cc2c
def getFrameRate(self): <NEW_LINE> <INDENT> if not self.proxy: <NEW_LINE> <INDENT> self.proxy = self.session.service("ALVisionRecognition") <NEW_LINE> <DEDENT> return self.proxy.getFrameRate()
Gets extractor framerate :returns int: Current value of the framerate of the extractor
625941b3de87d2750b85fb48
def cleanTest(vect, test_docs, output_file): <NEW_LINE> <INDENT> dtm = vect.transform(test_docs) <NEW_LINE> logger.info("\t\tTest DTM shape: %s\n" % (dtm.shape,)) <NEW_LINE> sp.save_npz(output_file, dtm)
Transform test texts into DTM with the training CountVectorizer. Save the test DTM. Inputs: - vect (sklearn.feature_extraction.text.CountVectorizer): CountVectorizer fitted on the training set - test_docs ([str]): test documents - output_file (str): path to save the test DTM as a .npz
625941b392d797404e303f4c
def start(self, ip, port): <NEW_LINE> <INDENT> self.read_servers() <NEW_LINE> self.create_master_node() <NEW_LINE> print("zk_minoter 注册中心启动成功")
开始服务调用接口
625941b3287bf620b61d382c
def getElementsList(self, tag, filter=None, elementList=None): <NEW_LINE> <INDENT> self._wait() <NEW_LINE> if elementList: <NEW_LINE> <INDENT> allElements = elementList <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self.frameName: <NEW_LINE> <INDENT> myFrame = self.getFrame(self.frameName) <NEW_LINE> if self.formName: <NEW_LINE> <INDENT> elements = myFrame.Document.Body.orms[self.formName].getElementsByTagName(tag) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> elements = myFrame.Document.Body.getElementsByTagName(tag) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if self.formName: <NEW_LINE> <INDENT> elements = self._ie.Document.Body.forms[self.formName].getElementsByTagName(tag) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> elements = self._ie.Document.Body.getElementsByTagName(tag) <NEW_LINE> <DEDENT> <DEDENT> count = 0 <NEW_LINE> allElements = [] <NEW_LINE> while count < elements.length: <NEW_LINE> <INDENT> allElements.append(elements[count]) <NEW_LINE> count +=1 <NEW_LINE> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> if filter: <NEW_LINE> <INDENT> myElements = [] <NEW_LINE> filters = filter.split(";") <NEW_LINE> for el in allElements: <NEW_LINE> <INDENT> match = False <NEW_LINE> for f in filters[:]: <NEW_LINE> <INDENT> atts = f.split("=") <NEW_LINE> valText = el.getAttribute(atts[0]) <NEW_LINE> if valText != None: <NEW_LINE> <INDENT> valText = str(valText) <NEW_LINE> valText = valText.strip() <NEW_LINE> valText = valText.lower() <NEW_LINE> wantText = atts[1].lower() <NEW_LINE> if valText == wantText: <NEW_LINE> <INDENT> match = True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if match: <NEW_LINE> <INDENT> myElements.append(el) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> myElements = allElements <NEW_LINE> <DEDENT> return myElements <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> (ErrorType,ErrorValue,ErrorTB)=sys.exc_info() <NEW_LINE> print (sys.exc_info()) <NEW_LINE> traceback.print_exc(ErrorTB) <NEW_LINE> return None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
Sets the specified attribute of any element parameters: tag - The HTML tag name [filter] - Only return elements that match this filter in format (att1=val1;att2=val2), ie. "type=checkbox;checked=True" returns: A filtered list of the found elements
625941b3627d3e7fe0d68c09
def command(self, command): <NEW_LINE> <INDENT> _command = '{0}\r\n'.format(command) <NEW_LINE> while len(_command) > 0: <NEW_LINE> <INDENT> character_to_send = _command[0] <NEW_LINE> read_back_character = '' <NEW_LINE> _command = _command[1:] <NEW_LINE> while character_to_send != read_back_character: <NEW_LINE> <INDENT> self._write(character_to_send) <NEW_LINE> time.sleep(self._read_delay) <NEW_LINE> read_back_character = self._read() <NEW_LINE> <DEDENT> <DEDENT> answer = '' <NEW_LINE> while len(answer) < 2 or answer[-2:] != '\r\n': <NEW_LINE> <INDENT> time.sleep(self._read_delay) <NEW_LINE> answer = ''.join((answer, self._read())) <NEW_LINE> <DEDENT> return answer[:-2]
Send a command word character by character to the device. Check reply to be in sync and process response. Keyword arguments: command -- the command word to send to the device
625941b35fc7496912cc3740
def close_logger(self): <NEW_LINE> <INDENT> handlers = self.log.handlers[:] <NEW_LINE> for handler in handlers: <NEW_LINE> <INDENT> handler.close() <NEW_LINE> self.log.removeHandler(handler)
Correctly closes the FileHandler objects on the logger so that there are no issues with file manipulation. :return: None.
625941b33317a56b86939a26
def test_search(): <NEW_LINE> <INDENT> criteria = collections.defaultdict(int) <NEW_LINE> for _ in range(2): <NEW_LINE> <INDENT> issue = factory.create_issue_dict() <NEW_LINE> r = requests.post( _URL_CREATE, data=json.dumps(issue), headers={'Content-Type': 'application/json'}, auth=tu.get_credentials() ) <NEW_LINE> assert r.status_code == 200 <NEW_LINE> criteria[(issue['project'], issue['severity'], issue['status'])] += 1 <NEW_LINE> <DEDENT> return <NEW_LINE> for project, severity, status in criteria: <NEW_LINE> <INDENT> params = { 'criteria': ','.join([ 'project:{}'.format(project), 'severity:{}'.format(severity), 'status:{}'.format(status) ]) } <NEW_LINE> r = requests.get(_URL_SEARCH, params=params) <NEW_LINE> data = tu.assert_ws_response(_URL_SEARCH, r) <NEW_LINE> assert data['total'] >= criteria[project, severity, status]
ERRATA :: WS :: SEARCH :: execution.
625941b356b00c62f0f14417
def test_balance_1_node(): <NEW_LINE> <INDENT> from bst import BST <NEW_LINE> new_bst = BST() <NEW_LINE> new_bst.insert(10) <NEW_LINE> assert new_bst.balance() == 0
Test balance of tree of only head.
625941b3462c4b4f79d1d48b
def satisfy_custom_matcher(self, args, kwargs): <NEW_LINE> <INDENT> is_match = super(Expectation, self).satisfy_custom_matcher(args, kwargs) <NEW_LINE> if is_match: <NEW_LINE> <INDENT> self._satisfy() <NEW_LINE> <DEDENT> return is_match
Returns a boolean indicating whether or not the mock will accept the provided arguments. :param tuple args: A tuple of possition args :param dict kwargs: A dictionary of keyword args :return: Whether or not the mock accepts the provided arguments. :rtype: bool
625941b35166f23b2e1a4f13
def test_summary(self): <NEW_LINE> <INDENT> cache = DummyCache() <NEW_LINE> cache.upload("pkg1-0.3.tar.gz", None) <NEW_LINE> cache.upload("pkg1-1.1.tar.gz", None) <NEW_LINE> p1 = cache.upload("pkg1a2.tar.gz", None, "pkg1", "1.1.1a2", "summary") <NEW_LINE> p2 = cache.upload("pkg2.tar.gz", None, "pkg2", "0.1dev2", "summary") <NEW_LINE> summaries = cache.summary() <NEW_LINE> self.assertItemsEqual( summaries, [ { "name": "pkg1", "summary": "summary", "last_modified": p1.last_modified, }, { "name": "pkg2", "summary": "summary", "last_modified": p2.last_modified, }, ], )
summary constructs per-package metadata summary
625941b39f2886367277a655
def get_user_by_address(self, address: str, user_name: str = None) -> Optional[User]: <NEW_LINE> <INDENT> for user in self.user_list: <NEW_LINE> <INDENT> if user.address == address: <NEW_LINE> <INDENT> return user <NEW_LINE> <DEDENT> <DEDENT> if user_name is None: <NEW_LINE> <INDENT> user_name = self.get_name_by_address(address) <NEW_LINE> <DEDENT> new_user = User(self, address, user_name) <NEW_LINE> self.add_user(new_user) <NEW_LINE> return new_user
Returns a User object with the specified user name. :param address: address of the user which is being searched for or added :param user_name: Name of user which is being searched for
625941b3b57a9660fec3363a
def GetReference(self,coordinates): <NEW_LINE> <INDENT> pass
GetReference(self: RepeatingReferenceSource,coordinates: RepeaterCoordinates) -> Reference Returns an individual repeating reference given by coordinates in the array,or ll if there is no reference at the coordinates (for example if there is a hole in a divided surface.) coordinates: The coordinates in the array of repeating references. Returns: The repeating reference.
625941b3adb09d7d5db6c54f