code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def get_object(self, queryset=None): <NEW_LINE> <INDENT> if "username" in self.kwargs: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return Profile.objects.get(username=self.kwargs["username"]) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if hasattr(self.request, "user"): <NEW_LINE> <INDENT> return self.request.user <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass
There are two ways to be called `profile/` or `profile/{username}`
625941b51f037a2d8b945ff3
def root_style(font=132): <NEW_LINE> <INDENT> hipStyle = ROOT.TStyle("clearRetro", "HIP plots style for publications") <NEW_LINE> hipStyle.SetFrameBorderMode(0) <NEW_LINE> hipStyle.SetCanvasBorderMode(0) <NEW_LINE> hipStyle.SetPadBorderMode(0) <NEW_LINE> hipStyle.SetPadBorderSize(0) <NEW_LINE> hipStyle.SetPadColor(0) <NEW_LINE> hipStyle.SetCanvasColor(0) <NEW_LINE> hipStyle.SetTitleColor(0) <NEW_LINE> hipStyle.SetStatColor(0) <NEW_LINE> hipStyle.SetHistLineWidth(2) <NEW_LINE> hipStyle.SetLineWidth(2) <NEW_LINE> hipStyle.SetOptTitle(0) <NEW_LINE> hipStyle.SetOptStat(0) <NEW_LINE> hipStyle.SetOptFit(0) <NEW_LINE> hipStyle.SetLineStyleString(2, "[12 12]") <NEW_LINE> hipStyle.SetTextFont(font) <NEW_LINE> hipStyle.SetTextSize(0.24) <NEW_LINE> hipStyle.SetLabelFont(font, "x") <NEW_LINE> hipStyle.SetLabelFont(font, "y") <NEW_LINE> hipStyle.SetLabelFont(font, "z") <NEW_LINE> hipStyle.SetTitleFont(font, "x") <NEW_LINE> hipStyle.SetTitleFont(font, "y") <NEW_LINE> hipStyle.SetTitleFont(font, "z") <NEW_LINE> hipStyle.SetLegendFont(font) <NEW_LINE> hipStyle.SetLabelSize(0.04, "x") <NEW_LINE> hipStyle.SetTitleSize(0.05, "x") <NEW_LINE> hipStyle.SetTitleColor(1, "x") <NEW_LINE> hipStyle.SetLabelSize(0.04, "y") <NEW_LINE> hipStyle.SetTitleSize(0.05, "y") <NEW_LINE> hipStyle.SetTitleColor(1, "y") <NEW_LINE> hipStyle.SetLabelSize(0.04, "z") <NEW_LINE> hipStyle.SetTitleSize(0.05, "z") <NEW_LINE> hipStyle.SetTitleColor(1, "z") <NEW_LINE> hipStyle.SetTitleOffset(0.8, "x") <NEW_LINE> hipStyle.SetTitleOffset(0.8, "y") <NEW_LINE> hipStyle.SetTitleOffset(0.8, "z") <NEW_LINE> hipStyle.SetLegendBorderSize(1) <NEW_LINE> hipStyle.SetMarkerStyle(2) <NEW_LINE> NRGBs = 5 <NEW_LINE> NCont = 255 <NEW_LINE> stops = array.array('d', [0.00, 0.34, 0.61, 0.84, 1.00]) <NEW_LINE> red = array.array('d', [0.00, 0.00, 0.87, 1.00, 0.51]) <NEW_LINE> green = array.array('d', [0.00, 0.81, 1.00, 0.20, 0.00]) <NEW_LINE> blue = array.array('d', [0.51, 1.00, 0.12, 0.00, 0.00]) <NEW_LINE> ROOT.TColor.CreateGradientColorTable(NRGBs, stops, red, green, blue, NCont) <NEW_LINE> hipStyle.SetNumberContours(NCont) <NEW_LINE> ROOT.gROOT.SetStyle("clearRetro")
Sets the style for ROOT plots. The SNO+ standard style is adapted from a .C sent to collaboration. Args: font (int): Integer denoting the font style for plots. Default is 132. See https://root.cern.ch/root/html/TAttText.html for details.
625941b5ff9c53063f47bff3
def get_position(self): <NEW_LINE> <INDENT> img = self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'geetest_canvas_img'))) <NEW_LINE> time.sleep(2) <NEW_LINE> location = img.location <NEW_LINE> size = img.size <NEW_LINE> top, bottom, left, right = location['y'], location['y'] + size['height'], location['x'], location['x'] + size[ 'width'] <NEW_LINE> return top, bottom, left, right
获取验证码的位置 :return: 验证码位置元祖
625941b5a8370b7717052696
def get_varience(self, sample=True) -> List: <NEW_LINE> <INDENT> fi_sum = ( (sum(self.simple_frequency()) - 1) if sample else sum(self.simple_frequency()) ) <NEW_LINE> v2_sum = sum(self.deviation_v2()) <NEW_LINE> variance = round((v2_sum / fi_sum), self.decimal_places) <NEW_LINE> self._table["Variância"] = [variance] <NEW_LINE> return variance
Método para geração da variancia. :param sample: default true para calcular a variancia amostral :return: list
625941b5377c676e91271f9f
def func(self,func_name): <NEW_LINE> <INDENT> return self._eval(su2func,func_name)
Evaluates SU2 Design Functions by Name
625941b5236d856c2ad445d4
def test_two_excluded_named_namespaces(self): <NEW_LINE> <INDENT> gf = pagegenerators.GeneratorFactory(site=self.get_site()) <NEW_LINE> gf.handle_arg('-ns:not:Talk,File') <NEW_LINE> ns = set(range(16)) <NEW_LINE> ns.remove(1) <NEW_LINE> ns.remove(6) <NEW_LINE> self.assertTrue(ns.issubset(gf.namespaces))
Test two excluded named namespaces.
625941b58a43f66fc4b53e5f
def get_plugin_version(hpi_filename): <NEW_LINE> <INDENT> with zipfile.ZipFile(hpi_filename) as z: <NEW_LINE> <INDENT> return LooseVersion(re_plugin_version.sub(r"\1", z.read('META-INF/MANIFEST.MF')))
Extract the plugin version from the MANIFEST.MF :param hpi_filename: string hpi file name :return: distutils.version.StrictVersion
625941b5099cdd3c635f0a51
def _copytree(self, src, dst, symlinks=False, ignore=None): <NEW_LINE> <INDENT> if not os.path.exists(dst): <NEW_LINE> <INDENT> os.makedirs(dst) <NEW_LINE> <DEDENT> names = os.listdir(src) <NEW_LINE> if ignore is not None: <NEW_LINE> <INDENT> ignored_names = ignore(src, names) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ignored_names = set() <NEW_LINE> <DEDENT> errors = [] <NEW_LINE> for name in names: <NEW_LINE> <INDENT> if name in ignored_names: <NEW_LINE> <INDENT> self._logger.info("Ignored: " + str(name)) <NEW_LINE> continue <NEW_LINE> <DEDENT> srcname = os.path.join(src, name) <NEW_LINE> dstname = os.path.join(dst, name) <NEW_LINE> try: <NEW_LINE> <INDENT> if symlinks and os.path.islink(srcname): <NEW_LINE> <INDENT> linkto = os.readlink(srcname) <NEW_LINE> os.symlink(linkto, dstname) <NEW_LINE> <DEDENT> elif os.path.isdir(srcname): <NEW_LINE> <INDENT> self._copytree(srcname, dstname, symlinks, ignore) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> shutil.copy2(srcname, dstname) <NEW_LINE> <DEDENT> <DEDENT> except IOError as why: <NEW_LINE> <INDENT> errors.append((srcname, dstname, str(why))) <NEW_LINE> <DEDENT> except Error as err: <NEW_LINE> <INDENT> errors.extend(err.args[0]) <NEW_LINE> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> shutil.copystat(src, dst) <NEW_LINE> <DEDENT> except OSError as why: <NEW_LINE> <INDENT> if WindowsError is not None and isinstance(why, WindowsError): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> errors.extend((src, dst, str(why))) <NEW_LINE> <DEDENT> <DEDENT> if errors: <NEW_LINE> <INDENT> raise Error(errors)
Used to copy into current folder (tweak without folder creation)
625941b57b180e01f3dc45fc
def p_item(self,p): <NEW_LINE> <INDENT> mif=MIFObject() <NEW_LINE> if p[1]=='PPOINT': <NEW_LINE> <INDENT> mif.type='POINT' <NEW_LINE> mif.points=tuple(self.numbers) <NEW_LINE> <DEDENT> elif p[1]=='PPLINE': <NEW_LINE> <INDENT> mif.type='PLINE' <NEW_LINE> mif.pline=int(self.pline) <NEW_LINE> mif.pen=tuple(self.pen) <NEW_LINE> mif.smooth=self.smooth <NEW_LINE> mif.points=tuple(self.numbers) <NEW_LINE> <DEDENT> elif p[1]=='PLINE': <NEW_LINE> <INDENT> mif.type='LINE' <NEW_LINE> mif.pline=int(self.pline) <NEW_LINE> mif.pen=tuple(self.pen) <NEW_LINE> mif.points=tuple(self.numbers) <NEW_LINE> <DEDENT> elif p[1]=='PREGION': <NEW_LINE> <INDENT> mif.type='REGION' <NEW_LINE> mif.region=int(self.region) <NEW_LINE> mif.pen=tuple(self.pen) <NEW_LINE> mif.brush=tuple(self.brush) <NEW_LINE> mif.center=tuple(self.center) <NEW_LINE> mif.points=tuple(self.numbers) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> mif.type='NONE' <NEW_LINE> <DEDENT> for col in range(len(self.mid._cols)): <NEW_LINE> <INDENT> mif.setMid(self.mid._cols[col],self.mid._data[self.lineno][col]) <NEW_LINE> <DEDENT> logging.info(mif) <NEW_LINE> self.mifobj.append(mif)
item : NONE | point | pline | line | region
625941b55510c4643540f1ee
def readFromCSVHandle(self, f): <NEW_LINE> <INDENT> csvreader = csv.reader(f, delimiter=";", quotechar='"') <NEW_LINE> reactions = set() <NEW_LINE> line_no = 0 <NEW_LINE> for line in csvreader: <NEW_LINE> <INDENT> line_no += 1 <NEW_LINE> if line[0] == "NAME": <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.fluxDict[line[0]] = float(line[1]) <NEW_LINE> self.boundsDict[line[0]] = line[2], line[3] <NEW_LINE> <DEDENT> except IndexError: <NEW_LINE> <INDENT> raise SyntaxError("Syntax error in line %u:\nLine must " "contain exactly four values (name, flux, " "lb, ub)." % line_no) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> raise SyntaxError("Syntax error in line %u: Invalid " "floating point value." % line_no)
parse the file given by file handle f (must be open for reading)
625941b5ac7a0e7691ed3ecf
def drag_createitems(self): <NEW_LINE> <INDENT> if self.drag is not None and self.drag['state'] == 'inprogress': <NEW_LINE> <INDENT> if self.drag['gap'] is not None: <NEW_LINE> <INDENT> x = self.drag['gap'] * self.tabwidth <NEW_LINE> self.c.create_rectangle(x-self.gapwidth, 0, x+self.gapwidth, self.height, fill=self.holecolor, outline=self.holecolor) <NEW_LINE> <DEDENT> label = self.info[self.drag['tabid']]['tablabel'] <NEW_LINE> self.drag['textitem'] = self.c.create_text(self.drag['x'], self.drag['y'], text=label, font=self.titlefont, fill=self.textcolor, anchor='s')
Called by rebuild to create canvas items for drag in progress
625941b50c0af96317bb7fde
def load(self, addr): <NEW_LINE> <INDENT> code = CodeGen() <NEW_LINE> code.comment('load_addr_' + str(addr)) <NEW_LINE> code.widen_stack() <NEW_LINE> code.switch_lane(SP, MEM) <NEW_LINE> code.decrement_to_zero() <NEW_LINE> code.switch_lane(MEM, WLK) <NEW_LINE> code.search_zero_left() <NEW_LINE> code.switch_lane(WLK, MEM) <NEW_LINE> code.decrement_to_zero() <NEW_LINE> for _ in xrange(addr): <NEW_LINE> <INDENT> code.big_right() <NEW_LINE> <DEDENT> code.start_loop() <NEW_LINE> code.switch_lane(MEM, WLK) <NEW_LINE> code.search_zero_left() <NEW_LINE> code.switch_lane(WLK, SP) <NEW_LINE> code.search_zero_right() <NEW_LINE> code.switch_lane(SP, MEM) <NEW_LINE> code.increment() <NEW_LINE> code.switch_lane(MEM, WLK) <NEW_LINE> code.search_zero_left() <NEW_LINE> code.switch_lane(WLK, MEM) <NEW_LINE> code.increment() <NEW_LINE> for _ in xrange(addr): <NEW_LINE> <INDENT> code.big_right() <NEW_LINE> <DEDENT> code.decrement() <NEW_LINE> code.end_loop() <NEW_LINE> code.switch_lane(MEM, WLK) <NEW_LINE> code.search_zero_left() <NEW_LINE> code.switch_lane(WLK, MEM) <NEW_LINE> code.start_loop() <NEW_LINE> for _ in xrange(addr): <NEW_LINE> <INDENT> code.big_right() <NEW_LINE> <DEDENT> code.increment() <NEW_LINE> code.switch_lane(MEM, WLK) <NEW_LINE> code.search_zero_left() <NEW_LINE> code.switch_lane(WLK, MEM) <NEW_LINE> code.decrement() <NEW_LINE> code.end_loop() <NEW_LINE> code.switch_lane(MEM, SP) <NEW_LINE> code.search_zero_right() <NEW_LINE> code.newline() <NEW_LINE> return code.to_string()
Copies a value from the specified memory address and pushes it on the stack. The memory is preserved. Stack count: +1
625941b538b623060ff0abe4
def ens_update0_loc(ens, obs_ens, observations, perturbs, obs_err_cov, domains, taper): <NEW_LINE> <INDENT> def local_analysis(ii): <NEW_LINE> <INDENT> oBatch, tapering = taper(ii) <NEW_LINE> oBatch = np.arange(len(observations))[oBatch] <NEW_LINE> if len(oBatch) == 0: <NEW_LINE> <INDENT> return ens[:, ii] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> c = sqrt(tapering) <NEW_LINE> return ens_update0(ens[:, ii], obs_ens[:, oBatch]*c, observations[oBatch]*c, perturbs[:, oBatch]*c, obs_err_cov[np.ix_(oBatch, oBatch)]) <NEW_LINE> <DEDENT> <DEDENT> EE = map(local_analysis, domains) <NEW_LINE> Ea = np.empty_like(ens) <NEW_LINE> for ii, Eii in zip(domains, EE): <NEW_LINE> <INDENT> Ea[:, ii] = Eii <NEW_LINE> <DEDENT> return Ea
Perform local analysis/domain updates using `ens_update0`.
625941b56aa9bd52df036b97
@pytest.mark.parametrize("test_input, expected_result", TEST_DATA) <NEW_LINE> def test_dscp_range(test_input, expected_result): <NEW_LINE> <INDENT> assert dscp_range(test_input) == expected_result
Unit-test the dscp_range function
625941b5fbf16365ca6f5fb0
def test_3_money_loss_short_close(self): <NEW_LINE> <INDENT> self.assertAlmostEqual(sum(self.prestoList.money_loss_short_close), self.mongoList.money_loss_short_close,delta=0.1)
做空亏损订单收益
625941b5460517430c393f85
def readNext(self): <NEW_LINE> <INDENT> if self.stream == None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> data = readDelim(self.stream) <NEW_LINE> if data == None: <NEW_LINE> <INDENT> self.stream.close() <NEW_LINE> self.stream = None <NEW_LINE> return None <NEW_LINE> <DEDENT> self.chrom = data[0] <NEW_LINE> self.pos = int(data[1]) <NEW_LINE> self.storeCurrent(data) <NEW_LINE> return True
Read one line from stream and store it in the `current' attribute. Also sets `chrom' and `pos' to its first and second elements.
625941b5d58c6744b4257a56
def test_direct_write(self): <NEW_LINE> <INDENT> test_file = os.path.join(self.import_dir, 'test_file') <NEW_LINE> cmd = ['dd', 'if=/dev/zero', 'of=%s' % test_file, 'bs=4k', 'count=8', 'oflag=direct'] <NEW_LINE> rtn = self.common_launch_cmd(cmd) <NEW_LINE> if rtn != 0: <NEW_LINE> <INDENT> self.fail('DD returned error')
Write a large file
625941b52c8b7c6e89b355b9
def PolyArea(points): <NEW_LINE> <INDENT> x= points[:,0] <NEW_LINE> y= points[:,1] <NEW_LINE> return(0.5*np.abs(np.dot(x, np.roll(y, 1))-np.dot(y,np.roll(x, 1))))
Area of a 2D convex Polygon :param points: :return:
625941b5956e5f7376d70c71
def __init__(self, d): <NEW_LINE> <INDENT> if type(d)==int: <NEW_LINE> <INDENT> self._coords = [0] * d <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self._coords = [0] * len(d) <NEW_LINE> for i in range(len(d)): <NEW_LINE> <INDENT> self._coords[i] = d[i] <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> raise TypeError("Please enter a sequence of numbers or integer.")
Create n-dimensional vector of zeros.
625941b585dfad0860c3ac4e
def column_or_1d(y, warn=False): <NEW_LINE> <INDENT> shape = np.shape(y) <NEW_LINE> if len(shape) == 1: <NEW_LINE> <INDENT> return np.ravel(y) <NEW_LINE> <DEDENT> if len(shape) == 2 and shape[1] == 1: <NEW_LINE> <INDENT> if warn: <NEW_LINE> <INDENT> warnings.warn("A column-vector y was passed when a 1d array was" " expected. Please change the shape of y to " "(n_samples, ), for example using ravel().", DataConversionWarning, stacklevel=2) <NEW_LINE> <DEDENT> return np.ravel(y) <NEW_LINE> <DEDENT> raise ValueError("bad input shape {0}".format(shape))
Ravel column or 1d numpy array, else raises an error Parameters ---------- y : array-like warn : boolean, default False To control display of warnings. Returns ------- y : array
625941b5e8904600ed9f1d1f
def fulltexts(self, document): <NEW_LINE> <INDENT> file_id = self._file_id(document.file_code(fullpath=True)) <NEW_LINE> if not file_id: <NEW_LINE> <INDENT> logger.error(u'Fail to parse file_id for %s_%s', document.collection_acronym, document.publisher_id) <NEW_LINE> return None <NEW_LINE> <DEDENT> if not document.journal.languages: <NEW_LINE> <INDENT> logger.info(u'Journal without publication languages defined %s', file_id[0]) <NEW_LINE> return None <NEW_LINE> <DEDENT> data = {'fulltexts.pdf': set(), 'fulltexts.html': set()} <NEW_LINE> data['fulltexts.html'].add(document.original_language()) <NEW_LINE> if document.data_model_version == 'xml': <NEW_LINE> <INDENT> for lang in document.xml_languages() or []: <NEW_LINE> <INDENT> data['fulltexts.html'].add(lang) <NEW_LINE> <DEDENT> <DEDENT> languages = document.journal.languages + document.languages() <NEW_LINE> languages.append(document.original_language()) <NEW_LINE> for language in set(languages): <NEW_LINE> <INDENT> if self.is_file_available(file_id, 'pdf', language, document.original_language()): <NEW_LINE> <INDENT> data['fulltexts.pdf'].add(language) <NEW_LINE> logger.info( u'Fulltext available in pdf %s for %s, %s, %s', language, file_id[0], file_id[1], file_id[2] ) <NEW_LINE> <DEDENT> if self.is_file_available(file_id, 'html', language, document.original_language()): <NEW_LINE> <INDENT> data['fulltexts.html'].add(language) <NEW_LINE> logger.info( u'Fulltext available in html %s for %s, %s, %s', language, file_id[0], file_id[1], file_id[2] ) <NEW_LINE> <DEDENT> <DEDENT> ldata = {} <NEW_LINE> if len(data['fulltexts.pdf']) > 0: <NEW_LINE> <INDENT> for lang in data['fulltexts.pdf']: <NEW_LINE> <INDENT> if lang != document.original_language(): <NEW_LINE> <INDENT> fname = '_'.join([ lang, self._file_name(document.file_code(fullpath=True))] ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> fname = self._file_name(document.file_code(fullpath=True)) <NEW_LINE> <DEDENT> ldata['fulltexts.pdf.%s' % lang] = 'http://%s' % '/'.join([ document.scielo_domain, 'pdf', file_id[0], file_id[1], '%s.pdf' % fname ] ) <NEW_LINE> <DEDENT> <DEDENT> if len(data['fulltexts.html']) > 0: <NEW_LINE> <INDENT> for lang in data['fulltexts.html']: <NEW_LINE> <INDENT> ldata['fulltexts.html.%s' % lang] = 'http://%s/scielo.php?script=sci_arttext&pid=%s&tlng=%s' % ( document.scielo_domain, document.publisher_id, lang ) <NEW_LINE> <DEDENT> <DEDENT> return ldata
This method retrieve a dictionary of the available fulltexts in each document type ['pdf', 'html', 'xml'] input: xylose.scielo_document.Article() ex: output: { 'pdf': { 'pt': 'url', 'es': 'url', 'en': 'url' }, 'html': { 'pt': 'url', 'es': 'url', 'en': 'url' } }
625941b550485f2cf553cb8e
def read_environment(config, namespace=ENVSPACE, permissive=True): <NEW_LINE> <INDENT> this_config = config.copy() <NEW_LINE> for level1 in config.keys(): <NEW_LINE> <INDENT> if (config.get(level1) is None) or (type(config.get(level1)) is str): <NEW_LINE> <INDENT> env_var = "_".join([namespace, level1]).upper() <NEW_LINE> if os.environ.get(env_var, None) is not None: <NEW_LINE> <INDENT> this_config[level1] = os.environ.get(env_var) <NEW_LINE> <DEDENT> <DEDENT> elif type(config[level1]) is dict: <NEW_LINE> <INDENT> for level2 in config[level1].keys(): <NEW_LINE> <INDENT> if (config[level1][level2] is None) or (type(config[level1][level2])) is str: <NEW_LINE> <INDENT> env_var = '_'.join([namespace, level1, level2]).upper() <NEW_LINE> if os.environ.get(env_var, None) is not None: <NEW_LINE> <INDENT> this_config[level1][level2] = os.environ.get(env_var) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return this_config
Read in environment variable overrides Positional parameters: config - dict - a shallow copy of the main config object Keyword parameters: namespace - str - environment variable namespace. default 'TACC_' permissive - boolean - ignore loading or parsing errors Returns: An AttrDict configuration object
625941b50c0af96317bb7fdf
def pc_noutput_items_avg(self): <NEW_LINE> <INDENT> return _noaa_swig.hrpt_deframer_sptr_pc_noutput_items_avg(self)
pc_noutput_items_avg(hrpt_deframer_sptr self) -> float
625941b5498bea3a759b98a8
def create_type_from_user_string(self, typestr): <NEW_LINE> <INDENT> if '.' in typestr: <NEW_LINE> <INDENT> container = self._create_bare_container_type(typestr) <NEW_LINE> if container: <NEW_LINE> <INDENT> typeval = container <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> typeval = self._namespace.type_from_name(typestr) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> typeval = self.create_type_from_ctype_string(typestr) <NEW_LINE> <DEDENT> self.resolve_type(typeval) <NEW_LINE> if typeval.resolved: <NEW_LINE> <INDENT> typeval.ctype = None <NEW_LINE> <DEDENT> return typeval
Parse a C type string (as might be given from an annotation) and resolve it. For compatibility, we can consume both GI type string (utf8, Foo.Bar) style, as well as C (char *, FooBar) style. Note that type resolution may not succeed.
625941b5507cdc57c6306ac8
def output_csv_data(final_result_list_of_replicates, header_row, file_object): <NEW_LINE> <INDENT> f_out = file_object <NEW_LINE> f_out.write("{}\n".format(",".join([""] + header_row))) <NEW_LINE> for final_result_in_each_condition in final_result_list_of_replicates: <NEW_LINE> <INDENT> output_string_list = [final_result_in_each_condition[0]] + [ ",".join(["Replicate {}".format(index + 1)] + [str(flux) for flux in flux_list]) for index, flux_list in enumerate(final_result_in_each_condition[1:])] <NEW_LINE> f_out.write("\n".join(output_string_list + [""]))
Export the all flux values into one sheet to a .csv file. The results calculated from the same condition will be put together. :param final_result_list_of_replicates: All flux values results. :param header_row: Flux name list. :param file_object: Output file object. :return:
625941b58c3a8732951581b3
def execute_via_app( self, tool, app, session_id, history_id, user=None, incoming = {}, set_output_hid = False, overwrite = True, history=None, job_params=None ): <NEW_LINE> <INDENT> for name, value in incoming.iteritems(): <NEW_LINE> <INDENT> if isinstance( value, app.model.HistoryDatasetAssociation ): <NEW_LINE> <INDENT> dataset = value <NEW_LINE> dataset_name = name <NEW_LINE> type = 'hda' <NEW_LINE> break <NEW_LINE> <DEDENT> elif isinstance( value, app.model.LibraryDatasetDatasetAssociation ): <NEW_LINE> <INDENT> dataset = value <NEW_LINE> dataset_name = name <NEW_LINE> type = 'ldda' <NEW_LINE> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception( 'The dataset to set metadata on could not be determined.' ) <NEW_LINE> <DEDENT> <DEDENT> sa_session = app.model.context <NEW_LINE> job = app.model.Job() <NEW_LINE> job.session_id = session_id <NEW_LINE> job.history_id = history_id <NEW_LINE> job.tool_id = tool.id <NEW_LINE> if user: <NEW_LINE> <INDENT> job.user_id = user.id <NEW_LINE> <DEDENT> if job_params: <NEW_LINE> <INDENT> job.params = dumps( job_params ) <NEW_LINE> <DEDENT> start_job_state = job.state <NEW_LINE> try: <NEW_LINE> <INDENT> job.tool_version = tool.version <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> job.tool_version = "1.0.1" <NEW_LINE> <DEDENT> job.state = job.states.WAITING <NEW_LINE> job.set_handler(tool.get_job_handler( job_params )) <NEW_LINE> sa_session.add( job ) <NEW_LINE> sa_session.flush() <NEW_LINE> incoming[ '__ORIGINAL_DATASET_STATE__' ] = dataset.state <NEW_LINE> input_paths = [DatasetPath( dataset.id, real_path=dataset.file_name, mutable=False )] <NEW_LINE> external_metadata_wrapper = JobExternalOutputMetadataWrapper( job ) <NEW_LINE> cmd_line = external_metadata_wrapper.setup_external_metadata( dataset, sa_session, exec_dir = None, tmp_dir = app.config.new_file_path, dataset_files_path = app.model.Dataset.file_path, output_fnames = input_paths, config_root = app.config.root, config_file = app.config.config_file, datatypes_config = app.datatypes_registry.integrated_datatypes_configs, job_metadata = None, include_command = False, kwds = { 'overwrite' : overwrite } ) <NEW_LINE> incoming[ '__SET_EXTERNAL_METADATA_COMMAND_LINE__' ] = cmd_line <NEW_LINE> for name, value in tool.params_to_strings( incoming, app ).iteritems(): <NEW_LINE> <INDENT> job.add_parameter( name, value ) <NEW_LINE> <DEDENT> if type == 'hda': <NEW_LINE> <INDENT> job.add_input_dataset( dataset_name, dataset ) <NEW_LINE> <DEDENT> elif type == 'ldda': <NEW_LINE> <INDENT> job.add_input_library_dataset( dataset_name, dataset ) <NEW_LINE> <DEDENT> dataset._state = dataset.states.SETTING_METADATA <NEW_LINE> job.state = start_job_state <NEW_LINE> sa_session.flush() <NEW_LINE> app.job_queue.put( job.id, tool.id ) <NEW_LINE> dataset.datatype.before_setting_metadata( dataset ) <NEW_LINE> return job, odict()
Execute using application.
625941b5f548e778e58cd371
def system(command): <NEW_LINE> <INDENT> print('[system] {}'.format(command)) <NEW_LINE> p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) <NEW_LINE> output, err = p.communicate() <NEW_LINE> rc = p.returncode <NEW_LINE> if PY3: <NEW_LINE> <INDENT> output = output.decode("ascii") <NEW_LINE> err = err.decode("ascii") <NEW_LINE> <DEDENT> return rc, output, err
Returns (return-code, stdout, stderr)
625941b5cc0a2c11143dcc8e
def test_label_width_not_enough_single_field_rows(self): <NEW_LINE> <INDENT> name, *names = list(self.form.fields.keys()) <NEW_LINE> field_rows = [{name: self.form.fields[name]}] <NEW_LINE> if len(names) > 1: <NEW_LINE> <INDENT> double_row = {name: self.form.fields[name] for name in names[:2]} <NEW_LINE> field_rows.append(double_row) <NEW_LINE> <DEDENT> expected = {} <NEW_LINE> actual = self.form.determine_label_width(field_rows) <NEW_LINE> self.assertEqual(expected, actual)
The determine_label_width method returns empty values if there are not multiple rows of a single field.
625941b53eb6a72ae02ec2cf
def upload_queue_dir(base_dir, bucket_name, project_name, new_cov_only=True): <NEW_LINE> <INDENT> queue_dir = os.path.join(base_dir, "queue") <NEW_LINE> queue_files = [] <NEW_LINE> for queue_file in os.listdir(queue_dir): <NEW_LINE> <INDENT> if not queue_file.startswith("id:"): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if new_cov_only and not "+cov" in queue_file: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if ",sync:" in queue_file: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> queue_files.append(queue_file) <NEW_LINE> <DEDENT> cmdline_file = os.path.join(base_dir, "cmdline") <NEW_LINE> conn = S3Connection() <NEW_LINE> bucket = conn.get_bucket(bucket_name) <NEW_LINE> remote_path = "%s/queues/%s/" % (project_name, get_machine_id(base_dir)) <NEW_LINE> remote_files = [key.name.replace(remote_path, "", 1) for key in list(bucket.list(remote_path))] <NEW_LINE> upload_list = [] <NEW_LINE> for queue_file in queue_files: <NEW_LINE> <INDENT> if not queue_file in remote_files: <NEW_LINE> <INDENT> upload_list.append(os.path.join(queue_dir, queue_file)) <NEW_LINE> <DEDENT> <DEDENT> if not "cmdline" in remote_files: <NEW_LINE> <INDENT> upload_list.append(cmdline_file) <NEW_LINE> <DEDENT> for upload_file in upload_list: <NEW_LINE> <INDENT> remote_key = Key(bucket) <NEW_LINE> remote_key.name = remote_path + os.path.basename(upload_file) <NEW_LINE> print("Uploading file %s -> %s" % (upload_file, remote_key.name)) <NEW_LINE> remote_key.set_contents_from_filename(upload_file)
Synchronize the queue directory of the specified AFL base directory to the specified S3 bucket. This method only uploads files that don't exist yet on the receiving side. @type base_dir: String @param base_dir: AFL base directory @type bucket_name: String @param bucket_name: Name of the S3 bucket to use @type project_name: String @param project_name: Name of the project folder inside the S3 bucket @type new_cov_only: Boolean @param new_cov_only: Only upload files that have new coverage
625941b526068e7796caeace
@collect_auth <NEW_LINE> def redirect_unsupported_institution(auth): <NEW_LINE> <INDENT> cas_unsupp_inst_url = cas.get_login_url('', campaign='unsupportedinstitution') <NEW_LINE> if auth.logged_in: <NEW_LINE> <INDENT> return auth_logout(redirect_url=cas_unsupp_inst_url) <NEW_LINE> <DEDENT> return redirect(cas_unsupp_inst_url)
Sends user back to the "Unsupported Institution" page on CAS. Logs user out if they are already logged in. HTTP Method: GET :param auth: the authentication context :return
625941b521a7993f00bc7ade
@blueprint.route('/', methods=['GET']) <NEW_LINE> @require_logged_in_user <NEW_LINE> def view(_user): <NEW_LINE> <INDENT> from pyaspora.diaspora.models import MessageQueue <NEW_LINE> if MessageQueue.has_pending_items(_user): <NEW_LINE> <INDENT> return redirect(url_for('diaspora.run_queue', _external=True)) <NEW_LINE> <DEDENT> limit = int(request.args.get('limit', 10)) <NEW_LINE> friend_ids = [f.id for f in _user.contact.friends()] <NEW_LINE> clauses = [Post.Queries.shared_with_contact(_user.contact)] <NEW_LINE> if friend_ids: <NEW_LINE> <INDENT> clauses.append( Post.Queries.authored_by_contacts_and_public(friend_ids)) <NEW_LINE> <DEDENT> tag_ids = [t.id for t in _user.contact.interests] <NEW_LINE> if tag_ids: <NEW_LINE> <INDENT> clauses.append(Tag.Queries.public_posts_for_tags(tag_ids)) <NEW_LINE> <DEDENT> feed_query = or_(*clauses) <NEW_LINE> my_share = aliased(Share) <NEW_LINE> feed = db.session.query(Share).join(Post). outerjoin( my_share, and_( Post.id == my_share.post_id, my_share.contact == _user.contact ) ). outerjoin(PostTag).outerjoin(Tag). filter(feed_query). filter(or_(my_share.hidden == None, not_(my_share.hidden))). filter(Post.parent == None). order_by(desc(Post.thread_modified_at)). group_by(Post.id). options(contains_eager(Share.post)). options(joinedload(Share.post, Post.diasp)). limit(limit) <NEW_LINE> data = { 'feed': json_posts([(s.post, s) for s in feed], _user, True), 'limit': limit, 'actions': {}, } <NEW_LINE> if len(data['feed']) >= limit: <NEW_LINE> <INDENT> data['actions']['more'] = url_for('feed.view', limit=limit + 10, _external=True) <NEW_LINE> <DEDENT> add_logged_in_user_to_data(data, _user) <NEW_LINE> return render_response('feed.tpl', data)
Show the logged-in user their own feed.
625941b56fece00bbac2d530
def reset(self): <NEW_LINE> <INDENT> self.reward = 0 <NEW_LINE> self.error = 0 <NEW_LINE> self.h = [] <NEW_LINE> for i in range(self.S): <NEW_LINE> <INDENT> self.h = np.append(self.h, 1) <NEW_LINE> <DEDENT> self.SNR_s = [] <NEW_LINE> self.SNR = [] <NEW_LINE> for i in range(self.S): <NEW_LINE> <INDENT> self.SNR_s = np.append(self.SNR_s, self.SNR_avg[i]) <NEW_LINE> <DEDENT> for i in range(self.W): <NEW_LINE> <INDENT> self.SNR = np.concatenate((self.SNR, self.SNR_s), axis=0) <NEW_LINE> <DEDENT> self.SNR = self.SNR.reshape((self.W, self.S)) <NEW_LINE> self.tasks_prev = [] <NEW_LINE> self.tasks_prev_s = [] <NEW_LINE> for i in range(self.S): <NEW_LINE> <INDENT> self.tasks_prev_s = np.append(self.tasks_prev_s, 0) <NEW_LINE> <DEDENT> for i in range(self.W - 1 + self.CSI): <NEW_LINE> <INDENT> self.tasks_prev = np.concatenate((self.tasks_prev, self.tasks_prev_s), axis=0) <NEW_LINE> <DEDENT> self.tasks_prev = self.tasks_prev.reshape((self.W - 1 + self.CSI, self.S)) <NEW_LINE> if self.CSI == 1: <NEW_LINE> <INDENT> self.state = np.log10(self.SNR) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.state = np.concatenate((np.log10(self.SNR[:-1]), self.tasks_prev/(10**9)), axis=0)
resets reward, error variables to zero. Sets SNR and workload back. :return:
625941b5a8ecb033257d2ecc
def recv(self, tr, host_file): <NEW_LINE> <INDENT> def _recv(): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> fm = FileReciever(host_file) <NEW_LINE> job = self.e_recv.submit(_recv) <NEW_LINE> return job
Search for senders - cancel if none needed - wait x seconds for response
625941b58da39b475bd64d6c
def test_create_done_several_follows_obliviate(self): <NEW_LINE> <INDENT> for record in self.avatars: <NEW_LINE> <INDENT> record.state = 'present' <NEW_LINE> <DEDENT> Operation = self.registry.Wms.Operation <NEW_LINE> other_reason = Operation.Arrival.insert(physobj_type=self.physobj_type, location=self.loc, state='done', dt_execution=self.dt_test1, quantity=35) <NEW_LINE> self.avatars[0].outcome_of = other_reason <NEW_LINE> agg = self.Agg.create(inputs=self.avatars, state='done', dt_execution=self.dt_test2) <NEW_LINE> self.assertEqual(set(agg.follows), set((self.arrivals[1], other_reason))) <NEW_LINE> agg.obliviate() <NEW_LINE> new_avatars = self.PhysObj.Avatar.query().all() <NEW_LINE> self.assertEqual(len(new_avatars), 2) <NEW_LINE> for avatar in new_avatars: <NEW_LINE> <INDENT> if avatar.obj.quantity == 1: <NEW_LINE> <INDENT> exp_reason = other_reason <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> exp_reason = self.arrivals[1] <NEW_LINE> <DEDENT> self.assertEqual(avatar.outcome_of, exp_reason) <NEW_LINE> <DEDENT> self.assertEqual(Operation.HistoryInput.query().count(), 0)
Test that oblivion doesn't shuffle original reasons. TODO this test has one chance over 2 to pass by accident. make a better one, and label it as a MultiplePhysObj mixin test, by issuing more goods and reasons and pairing them randomly so that chances of passing by coincidence are really low. TODO now that ``reason`` has been changed to ``outcome_of`` which isn't mutated after creation, consider removing this test altogether
625941b5d10714528d5ffad5
def orangesRotting(self, grid): <NEW_LINE> <INDENT> que = [] <NEW_LINE> len_x,len_y = len(grid),len(grid[0]) <NEW_LINE> for i in range(len_x): <NEW_LINE> <INDENT> for j in range(len_y): <NEW_LINE> <INDENT> if grid[i][j]==2: <NEW_LINE> <INDENT> que.append([i,j]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> count = 0 <NEW_LINE> while(1): <NEW_LINE> <INDENT> sub = [] <NEW_LINE> for n in que: <NEW_LINE> <INDENT> list_my = self.find(n,len_x,len_y) <NEW_LINE> for m in list_my: <NEW_LINE> <INDENT> if grid[m[0]][m[1]]==1: <NEW_LINE> <INDENT> grid[m[0]][m[1]]=2 <NEW_LINE> sub.append(m) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if sub==[]:break <NEW_LINE> que=sub <NEW_LINE> count+=1 <NEW_LINE> <DEDENT> for i in range(len_x): <NEW_LINE> <INDENT> for j in range(len_y): <NEW_LINE> <INDENT> if grid[i][j]==1: <NEW_LINE> <INDENT> return -1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return count
:type grid: List[List[int]] :rtype: int
625941b5baa26c4b54cb0f19
def __init__(self): <NEW_LINE> <INDENT> print("creating grid") <NEW_LINE> self.squares = [ [self._get_a_placeholder() for _ in range(GRID_WIDTH)] for _ in range(GRID_HEIGHT - 4) ] + [ [EMPTY for _ in range(GRID_WIDTH)] for _ in range(4) ] <NEW_LINE> self.next_row = [self._get_a_placeholder() for _ in range(GRID_WIDTH)] <NEW_LINE> self._deleted_rows = []
Constructor for the grid. The first lines of squares are the upper ones, the last are the lower ones. There is a structure for next row, which is separate from the grid
625941b5adb09d7d5db6c589
def move_nanmean(arr, window, axis=-1, method='loop'): <NEW_LINE> <INDENT> arr = np.array(arr, copy=False) <NEW_LINE> if method == 'filter': <NEW_LINE> <INDENT> y = move_nanmean_filter(arr, window, axis=axis) <NEW_LINE> <DEDENT> elif method == 'strides': <NEW_LINE> <INDENT> y = move_func_strides(bn.slow.nanmean, arr, window, axis=axis) <NEW_LINE> <DEDENT> elif method == 'loop': <NEW_LINE> <INDENT> y = move_func_loop(bn.slow.nanmean, arr, window, axis=axis) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> msg = "`method` must be 'filter', 'strides', or 'loop'." <NEW_LINE> raise ValueError(msg) <NEW_LINE> <DEDENT> if y.dtype != arr.dtype: <NEW_LINE> <INDENT> if issubclass(arr.dtype.type, np.inexact): <NEW_LINE> <INDENT> y = y.astype(arr.dtype) <NEW_LINE> <DEDENT> <DEDENT> return y
Slow move_nanmean for unaccelerated ndim/dtype combinations. Parameters ---------- arr : array_like Input array. window : int The number of elements in the moving window. axis : int, optional The axis over which to perform the moving mean. By default the moving mean is taken over the last axis (-1). method : str, optional The following moving window methods are available: ========== ===================================== 'filter' scipy.ndimage.convolve1d 'strides' strides tricks 'loop' brute force python loop (default) ========== ===================================== Returns ------- y : ndarray The moving mean of the input array along the specified axis, ignoring NaNs. (A window with all NaNs returns NaN for the window mean.) The output has the same shape as the input. Examples -------- >>> arr = np.array([1, 2, np.nan, 4]) >>> bn.slow.move_nanmean(arr, window=2, axis=0) array([ NaN, 1.5, 2. , 4. ])
625941b50a50d4780f666c85
def commentOKPYSubmit(notebook_data, mode="comment"): <NEW_LINE> <INDENT> for cell in notebook_data["cells"]: <NEW_LINE> <INDENT> source = cell["source"] <NEW_LINE> for index, line in enumerate(source): <NEW_LINE> <INDENT> if("ok.submit" in line): <NEW_LINE> <INDENT> if mode == "comment": <NEW_LINE> <INDENT> source[index] = "# Removed by AutoGrader" + line <NEW_LINE> <DEDENT> elif mode == "delete": <NEW_LINE> <INDENT> del source[index] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return notebook_data
Comments out any lines that contain ok.submit() to prevent automatic resubmission for students can change mode to 'delete' in order to remove the line instead
625941b5a79ad161976cbf3b
def stencil( x=0,y=0,width=-1,height=-1, normalise = 1, flip = 1, ): <NEW_LINE> <INDENT> glPixelStorei(GL_PACK_ALIGNMENT, 1) <NEW_LINE> data = glReadPixels(0, 0, width, height, GL_STENCIL_INDEX, GL_UNSIGNED_BYTE) <NEW_LINE> if flip: <NEW_LINE> <INDENT> data = list(data) <NEW_LINE> data.reverse() <NEW_LINE> <DEDENT> data = ravel(data) <NEW_LINE> if normalise and data: <NEW_LINE> <INDENT> data = data.astype('f') <NEW_LINE> diff = float(max(data)) - float(min(data)) <NEW_LINE> data = (data - min(data)) * (255.0/(diff or 1.0)) <NEW_LINE> <DEDENT> data = data.astype( 'b' ) <NEW_LINE> image = Image.fromstring( "L", (width, height), data ) <NEW_LINE> if flip: <NEW_LINE> <INDENT> image = image.transpose( Image.FLIP_TOP_BOTTOM) <NEW_LINE> <DEDENT> return image
Get the stencil-buffer as a PIL image x,y -- start position for the captured rectangle width,height -- size of the captured rectangle if normalise is true, the image will be scaled to make the most-positive (deepest) values white and the most-negative (closest) values black. if flip, then the image will be flipped vertically so that it matches the PIL conventions instead of the OpenGL conventions.
625941b563d6d428bbe442e5
def set_lcao(self, mode='last'): <NEW_LINE> <INDENT> pdfparam = self._pdfparam <NEW_LINE> entry = self._entry <NEW_LINE> iterations = pdfparam.iterations <NEW_LINE> start_itr = 1 <NEW_LINE> if mode == 'last': <NEW_LINE> <INDENT> start_itr = iterations <NEW_LINE> <DEDENT> m = pdf.Matrix() <NEW_LINE> for itr in range(start_itr, iterations + 1): <NEW_LINE> <INDENT> for runtype in pdfparam.runtypes(): <NEW_LINE> <INDENT> path = pdfparam.get_c_mat_path(runtype, itr) <NEW_LINE> self._logger.debug('load %s' % (path)) <NEW_LINE> if m.is_loadable(path): <NEW_LINE> <INDENT> m.load(path) <NEW_LINE> entry.set_lcao(runtype, itr, m)
LCAO行列をDBに格納する TODO: modeの実装
625941b530c21e258bdfa293
def model(self, model): <NEW_LINE> <INDENT> if model is not None: <NEW_LINE> <INDENT> model = weakref.ref(model) <NEW_LINE> <DEDENT> return self.execution_options(model=model)
Shortcut to set execution option ``model`` in a chaining call. Read :meth:`~gino.engine.GinoConnection.execution_options` for more information.
625941b550812a4eaa59c11c
def get_solutions_results_table(self, fields=False, max_number=None, orderby=False): <NEW_LINE> <INDENT> self.check_if_is_connected() <NEW_LINE> log.info("Getting %s from solutions", fields) <NEW_LINE> f = self.get_fields_string(fields) <NEW_LINE> sql_command = "SELECT %s FROM %s " % (f, self.results_table) <NEW_LINE> if orderby: <NEW_LINE> <INDENT> sql_command += " ORDER BY %s ASC" % orderby <NEW_LINE> <DEDENT> if max_number not in (None, False): <NEW_LINE> <INDENT> sql_command += " LIMIT %d" % (max_number) <NEW_LINE> <DEDENT> log.debug("Using %s", sql_command) <NEW_LINE> data = self.retrieve_data(sql_command) <NEW_LINE> return data
Recovers solutions @param fields Fields to recover from the table @param max_number Maximum number of solutions to recover @param orderby Name of the restraint used for sorting the states
625941b582261d6c526ab299
def run_pipeline(self, **kwargs): <NEW_LINE> <INDENT> df_train = self.generate_random_data() <NEW_LINE> self.write_csv_s3(df_train, "train.csv", **kwargs)
Run the entire data cleansing pipeline :return:
625941b5167d2b6e31218994
def evolve(self): <NEW_LINE> <INDENT> possibleEvolutions = [] <NEW_LINE> for nodeT in nodeTypes: <NEW_LINE> <INDENT> possibleEvolutions += nodeT.create_basic(self.parent) <NEW_LINE> <DEDENT> return possibleEvolutions
:return: A list of all the nodes this could evolve into
625941b5baa26c4b54cb0f1a
def get_dominant_frequency(self, frame_number): <NEW_LINE> <INDENT> samples_per_frame = int(self.sample_rate / self.video.fps) <NEW_LINE> frame_samples = self.audio[frame_number * samples_per_frame : (frame_number + 1) * samples_per_frame] <NEW_LINE> if TESTING: <NEW_LINE> <INDENT> print('Starting FFT processing at time: ' + str(time.time())) <NEW_LINE> <DEDENT> w = np.fft.fft(frame_samples) <NEW_LINE> freqs = np.fft.fftfreq(len(w)) <NEW_LINE> idx = np.argmax(np.abs(w)) <NEW_LINE> freq = freqs[idx] <NEW_LINE> freq_in_hertz = abs(freq * self.sample_rate) <NEW_LINE> if TESTING: <NEW_LINE> <INDENT> print('Finishing FFT processing at time: ' + str(time.time())) <NEW_LINE> <DEDENT> return freq_in_hertz
Returns the dominant audio frequency of a given frame in hertz.
625941b5925a0f43d2549c69
def confd_state_internal_callpoints_authentication_callback_registration_type_range_range_lower(self, **kwargs): <NEW_LINE> <INDENT> config = ET.Element("config") <NEW_LINE> confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") <NEW_LINE> internal = ET.SubElement(confd_state, "internal") <NEW_LINE> callpoints = ET.SubElement(internal, "callpoints") <NEW_LINE> authentication_callback = ET.SubElement(callpoints, "authentication-callback") <NEW_LINE> registration_type = ET.SubElement(authentication_callback, "registration-type") <NEW_LINE> range = ET.SubElement(registration_type, "range") <NEW_LINE> range = ET.SubElement(range, "range") <NEW_LINE> lower = ET.SubElement(range, "lower") <NEW_LINE> lower.text = kwargs.pop('lower') <NEW_LINE> callback = kwargs.pop('callback', self._callback) <NEW_LINE> return callback(config)
Auto Generated Code
625941b5627d3e7fe0d68c44
def execRunCmd_multi(proc): <NEW_LINE> <INDENT> if is_stop() is True: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> proc_name = proc["jar"].split("/") <NEW_LINE> id = start_proc_logs(proc_name[len(proc_name) - 1], proc["desc"]) <NEW_LINE> start = int(time.time()) <NEW_LINE> def succ(pid, returncode, outs, errs): <NEW_LINE> <INDENT> LOGGER.info("exec_cmd pid:%s, returncode:%s" % (pid, returncode)) <NEW_LINE> LOGGER.info("exec_cmd outs: %s" % outs) <NEW_LINE> LOGGER.info("exec_cmd errs: %s" % errs) <NEW_LINE> end_proc_logs(id, start, 2) <NEW_LINE> <DEDENT> def fail(pid, returncode, outs, errs): <NEW_LINE> <INDENT> LOGGER.info("exec_cmd pid:%s, returncode:%s" % (pid, returncode)) <NEW_LINE> LOGGER.info("exec_cmd outs: %s" % outs) <NEW_LINE> LOGGER.info("exec_cmd errs: %s" % errs) <NEW_LINE> if returncode == -9: <NEW_LINE> <INDENT> end_proc_logs(id, start, 4) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> end_proc_logs(id, start, 3) <NEW_LINE> <DEDENT> <DEDENT> def before(pid): <NEW_LINE> <INDENT> update_proc_logs(id, pid) <NEW_LINE> <DEDENT> proc["app_name"] = proc["app_name"] + "_" + id <NEW_LINE> update_proc_logs_appname(id, proc["app_name"]) <NEW_LINE> try: <NEW_LINE> <INDENT> returncode = ProcUtil().single_pro(get_cmd(proc), succ, fail, before) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> LOGGER.info("error: %s" % e) <NEW_LINE> returncode = 1 <NEW_LINE> end_proc_logs(id, start, 3) <NEW_LINE> <DEDENT> return returncode
串行执行主方法
625941b5a934411ee3751491
def get_density(point, data, bandwidth=0.1): <NEW_LINE> <INDENT> kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(data) <NEW_LINE> return kde.score_samples(np.array([point]))[0]
This function returns the density of the data at the given point, using t-distribution kernel density :param point: A numpy array vector specifying a point in space to evaluate the density at :param data: A 2D numpy array of points (rows) :return: A float representing the density of data-points at the given point
625941b54d74a7450ccd3fb9
def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, ArrayGetResponse): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return self.__dict__ == other.__dict__
Returns true if both objects are equal
625941b54428ac0f6e5ba5f0
def namespace_term_counts(): <NEW_LINE> <INDENT> size = 100 <NEW_LINE> search_body = { "aggs": {"namespace_term_counts": {"terms": {"field": "namespace", "size": size}}} } <NEW_LINE> try: <NEW_LINE> <INDENT> results = es.search( index=settings.TERMS_INDEX, doc_type=settings.TERMS_DOCUMENT_TYPE, body=search_body, size=0, ) <NEW_LINE> results = results["aggregations"]["namespace_term_counts"]["buckets"] <NEW_LINE> return [{"namespace": r["key"], "count": r["doc_count"]} for r in results] <NEW_LINE> <DEDENT> except elasticsearch.ConnectionError as e: <NEW_LINE> <INDENT> logger.exception("Elasticsearch connection error", error=str(e)) <NEW_LINE> return None
Generate counts of each namespace in terms index This function is at least used in the /status endpoint to show how many terms are in each namespace and what namespaces are available. Returns: List[Mapping[str, int]]: array of namespace vs counts
625941b56fece00bbac2d531
def get_backup_databases(cinder_config=False): <NEW_LINE> <INDENT> REGION_LOCAL_DATABASES = ('postgres', 'template1', 'nova', 'sysinv', 'neutron', 'heat', 'nova_api', 'aodh', 'murano', 'magnum', 'panko', 'ironic', 'nova_cell0', 'gnocchi', 'fm', 'barbican') <NEW_LINE> REGION_SHARED_DATABASES = ('glance', 'keystone') <NEW_LINE> if cinder_config: <NEW_LINE> <INDENT> REGION_SHARED_DATABASES += ('cinder', ) <NEW_LINE> <DEDENT> DB_TABLE_SKIP_MAPPING = { 'fm': ('alarm',), 'gnocchi': ('metric', 'resource'), 'dcorch': ('orch_job', 'orch_request', 'resource', 'subcloud_resource'), } <NEW_LINE> if tsconfig.region_config == 'yes': <NEW_LINE> <INDENT> BACKUP_DATABASES = REGION_LOCAL_DATABASES <NEW_LINE> shared_services = sysinv.get_shared_services() <NEW_LINE> for service_type in ["image", "volume"]: <NEW_LINE> <INDENT> if service_type not in shared_services: <NEW_LINE> <INDENT> service = 'glance' if service_type == "image" else 'cinder' <NEW_LINE> BACKUP_DATABASES += (service, ) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> BACKUP_DATABASES = REGION_LOCAL_DATABASES + REGION_SHARED_DATABASES <NEW_LINE> if tsconfig.distributed_cloud_role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: <NEW_LINE> <INDENT> BACKUP_DATABASES += ('dcmanager', 'dcorch') <NEW_LINE> <DEDENT> <DEDENT> BACKUP_DB_SKIP_TABLES = dict( [[x, DB_TABLE_SKIP_MAPPING.get(x, ())] for x in BACKUP_DATABASES]) <NEW_LINE> return BACKUP_DATABASES, BACKUP_DB_SKIP_TABLES
Retrieve database lists for backup. :return: backup_databases and backup_database_skip_tables
625941b524f1403a92600960
def __repr__(self): <NEW_LINE> <INDENT> return '<Twilio.Preview.Understand.FieldPage>'
Provide a friendly representation :returns: Machine friendly representation :rtype: str
625941b5435de62698dfda4b
def __reduceTransEffect(self,PV,OP): <NEW_LINE> <INDENT> self.deriv = PV <NEW_LINE> self.prevErr = self.cfg['setPoint'] - PV <NEW_LINE> if self.cfg["ctrlType"] == "P": <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> elif self.cfg["ctrlType"] == "PI" or self.cfg["ctrlType"] == "PID": <NEW_LINE> <INDENT> return np.around(((self.cfg['Ki']/self.cfg['interval'])*((OP/self.cfg['Kg'])-(self.cfg['setPoint'] - PV))),0) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return ValueError('Invalid Control Type - Options are P, PI & PID')
Calculate the value for spErr (set point error) to ensure seamless bump during controller changover. Note that deriv and prevErr are also set. :param PV: Process variable at current time :param OP: Valve operating point :param ERR: Set point error :type PV: float :type OP: float :type ERR: float :return: Value for spErr
625941b592d797404e303f81
def orbital_pieces_number(self, piece_diameter): <NEW_LINE> <INDENT> contour_length = self.get_elipse_interpolation().length() <NEW_LINE> return int(contour_length / (piece_diameter + self.clearence))
Calculates how many pieces are to be placed in the orbital contour
625941b585dfad0860c3ac4f
def MEDnLink(*args, **kwargs): <NEW_LINE> <INDENT> return _medlink.MEDnLink(*args, **kwargs)
MEDnLink(fid) -> med_int Parameters: fid: med_idt const
625941b5f7d966606f6a9dff
def get_pokemon(pokemon_id): <NEW_LINE> <INDENT> return 'do some magic!'
get a specific pokemon by id get a pokemon using their unique id # noqa: E501 :param pokemon_id: it&#x27;s the pokemon id :type pokemon_id: int :rtype: Pokemon
625941b57b25080760e39252
def find_dirs(start_dir, creds, include=None, exclude=None): <NEW_LINE> <INDENT> return find(start_dir, creds, include=include, exclude=None, ftype='d')
returns a list of directories using the patterns in the include list
625941b567a9b606de4a7cb3
def define_stimuli(self): <NEW_LINE> <INDENT> dev1 = pygame.Surface(self.size) <NEW_LINE> dev2 = pygame.Surface(self.size) <NEW_LINE> std1 = pygame.Surface(self.size) <NEW_LINE> std2 = pygame.Surface(self.size) <NEW_LINE> red, orange, blue, yellow = (255,0,0), (255,100,0), (0,0,255), (255,255,0) <NEW_LINE> dev1.fill(red) <NEW_LINE> dev2.fill(orange) <NEW_LINE> std1.fill(blue) <NEW_LINE> std2.fill(yellow) <NEW_LINE> return [std1,std2], [dev1,dev2]
Creates standard and deviant stimuli.
625941b567a9b606de4a7cb4
def initialization(self, which_sources): <NEW_LINE> <INDENT> return TRIVARIATE_UNQ.initialization(self, which_sources)
Initialize the data for the triplets (T,V,W) where V,W in {X,Y,Z} Args: which_sources: list(int) - [1,2] if sources are X and Y [1,3] if sources are X and Z [2,3] if sources are Y and Z Returns: (if [1,2] v,w=x,y|if [1,3] v,w=x,z|if [2,3] v,w=y,z|) dictionary keys: (t,v,w) values: their indices list of (t,v,w)
625941b5851cf427c661a312
def psi_H_Li_Br_PK74(T, P): <NEW_LINE> <INDENT> psi = 0.0 <NEW_LINE> valid = np.isclose(T, 298.15, **temperature_tol) <NEW_LINE> return psi, valid
c-c'-a: hydrogen lithium bromide [PK74].
625941b5009cb60464c631b4
def testFieldDescriptorGetSet(self): <NEW_LINE> <INDENT> object = FieldTest() <NEW_LINE> self.failUnless(FieldTest.PublicStaticField == 0) <NEW_LINE> self.failUnless(object.PublicStaticField == 0) <NEW_LINE> descriptor = FieldTest.__dict__['PublicStaticField'] <NEW_LINE> self.failUnless(type(descriptor) != types.IntType) <NEW_LINE> object.PublicStaticField = 0 <NEW_LINE> descriptor = FieldTest.__dict__['PublicStaticField'] <NEW_LINE> self.failUnless(type(descriptor) != types.IntType) <NEW_LINE> FieldTest.PublicStaticField = 0 <NEW_LINE> descriptor = FieldTest.__dict__['PublicStaticField'] <NEW_LINE> self.failUnless(type(descriptor) != types.IntType)
Test field descriptor get / set.
625941b516aa5153ce36226e
def all_isinstance(iterable, t): <NEW_LINE> <INDENT> return all_iterable(iterable, lambda x: isinstance(x, t))
check if each object in an iterable is an instance of a type
625941b58a43f66fc4b53e60
def _pr_convergence_tf(self, convergence: float, topics: List[int] = None, c_criterion=ConvergenceCriterion.ONE) -> tf.Tensor: <NEW_LINE> <INDENT> raise NotImplementedError( 'subclasses must override page_rank_until_convergence()!')
Abstract method to implement a iterative version of PageRank until convergence rate. This method runs the PageRank algorithm in iterative fashion a undetermined number of times bounded by the `convergence` rate and the 'c_criterion' criterion. Args: convergence (float): A float between 0 and 1 that represents the convergence rate that allowed to finish the iterative implementations of the algorithm to accept the solution. Default to `1.0`. topics (:obj:`list` of :obj:`int`, optional): A list of integers that represent the set of vertex where the random jumps arrives. If this parameter is used, the uniform distribution over all vertices of the random jumps will be modified to jump only to this vertex set. Default to `None`. c_criterion (:obj:`function`, optional): The function used to calculate if the Convergence Criterion of the iterative implementations is reached. Default to `tf_G.ConvergenceCriterion.ONE`. Returns: (:obj:`tf.Tensor`): A 1-D `tf.Tensor` of [n] shape, where `n` is the cardinality of the graph vertex set. It contains the normalized rank of vertex `i` at position `i`.
625941b5236d856c2ad445d6
def __load_variable_combobox(self): <NEW_LINE> <INDENT> self.project.network_model.load_dictionaries() <NEW_LINE> items = self.project.network_model.get_sorted_variables() <NEW_LINE> if items is None: <NEW_LINE> <INDENT> messagebox = QTranusMessageBox.set_new_message_box(QtWidgets.QMessageBox.Warning, "Variables", "There are no variables to load.", ":/plugins/QTranus/icon.png", self, buttons = QtWidgets.QMessageBox.Ok) <NEW_LINE> messagebox.exec_() <NEW_LINE> print ("There are no variables to load.") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.variablesList.addItems(items)
@summary: Loads data to variable combo-box control
625941b58a43f66fc4b53e61
def inject(module, entrance, host, port, inject_args): <NEW_LINE> <INDENT> encoding = inject_args.get('encoding', 'utf-8') <NEW_LINE> code = create_payload(module, entrance, host, port, encoding) <NEW_LINE> inject_args['code'] = code <NEW_LINE> success = _inject(**inject_args) <NEW_LINE> if not success: <NEW_LINE> <INDENT> exit(1)
inject process with module.entrance
625941b5099cdd3c635f0a53
def getObject(self, resource): <NEW_LINE> <INDENT> resource = self.parseUrl(resource, 'objects') <NEW_LINE> res = self.getRequest(resource) <NEW_LINE> obj = vsdModels.APIObject._create(res) <NEW_LINE> return obj
retrieve an object based on the objectID/selfUrl :param int,str resource: (str) selfUrl of the object or the (int) object ID :return: the object :rtype: APIObject (or derived class)
625941b515fb5d323cde08ff
def getTarget(self): <NEW_LINE> <INDENT> return self.__target
Get the name of the output Returns ------- str name of the target
625941b5ab23a570cc24ff77
def wait_for_appearance(self, timeout=UI_TIME_OUT): <NEW_LINE> <INDENT> self.proxy.wait_for_appearance(timeout)
等待出现 :param timeout: 超时时间 :return: 等待元素出现,默认等待超时120秒,Airtest 找不到对象会抛出 TargetNotFoundError,Poco 则是抛出 PocoTargetTimeout 平台支持:- [x] android- [x] ios
625941b5090684286d50ead6
def check_events(ai_settings, screen, ship, bullets): <NEW_LINE> <INDENT> for event in pygame.event.get(): <NEW_LINE> <INDENT> if event.type == pygame.QUIT: <NEW_LINE> <INDENT> sys.exit() <NEW_LINE> <DEDENT> elif event.type == pygame.KEYDOWN: <NEW_LINE> <INDENT> check_keydown_events(event, ai_settings, screen, ship, bullets) <NEW_LINE> <DEDENT> elif event.type == pygame.KEYUP: <NEW_LINE> <INDENT> check_keyup_events(event, ship)
이벤트를 탐지하고 반응합니다.
625941b58a349b6b435e7f6b
def assert_methods_match(first, second): <NEW_LINE> <INDENT> first_methods = dict(inspect.getmembers(first, inspect.ismethod)) <NEW_LINE> second_methods = dict(inspect.getmembers(second, inspect.ismethod)) <NEW_LINE> for name, first_method in iter_items(first_methods): <NEW_LINE> <INDENT> if name.startswith("_"): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> first_argspec = inspect.getargspec(first_method) <NEW_LINE> first_formatted = inspect.formatargspec(*first_argspec) <NEW_LINE> assert name in second_methods <NEW_LINE> second_method = second_methods.get(name) <NEW_LINE> second_argspec = inspect.getargspec(second_method) <NEW_LINE> assert first_argspec == second_argspec
Assert that public methods in C{first} are present in C{second}. This helper function asserts that all public methods found in C{first} are also present in C{second} and accept the same arguments. C{first} may have its own private methods, though, and may not have all methods found in C{second}. Note that if a private method in C{first} matches the name of one in C{second}, their specification is still compared. This is useful to verify if a fake or stub class have the same API as the real class being simulated.
625941b5ec188e330fd5a59f
def fillObsCPT(bayesNet, gameState): <NEW_LINE> <INDENT> bottomLeftPos, topLeftPos, bottomRightPos, topRightPos = gameState.getPossibleHouses() <NEW_LINE> "*** YOUR CODE HERE ***" <NEW_LINE> def getAssignment(x,y): <NEW_LINE> <INDENT> height = gameState.data.layout.height / 2.0 <NEW_LINE> width = gameState.data.layout.width / 2.0 <NEW_LINE> if x > width and y > height : <NEW_LINE> <INDENT> return TOP_RIGHT_VAL <NEW_LINE> <DEDENT> elif x > width and y < height : <NEW_LINE> <INDENT> return BOTTOM_RIGHT_VAL <NEW_LINE> <DEDENT> elif x < width and y < height : <NEW_LINE> <INDENT> return BOTTOM_LEFT_VAL <NEW_LINE> <DEDENT> elif x < width and y > height: <NEW_LINE> <INDENT> return TOP_LEFT_VAL <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> for housePos in gameState.getPossibleHouses(): <NEW_LINE> <INDENT> for obsPos in gameState.getHouseWalls(housePos): <NEW_LINE> <INDENT> obsVar = OBS_VAR_TEMPLATE % obsPos <NEW_LINE> obsFactor = bn.Factor([obsVar], [FOOD_HOUSE_VAR, GHOST_HOUSE_VAR] , bayesNet.variableDomainsDict()) <NEW_LINE> for assignment in obsFactor.getAllPossibleAssignmentDicts(): <NEW_LINE> <INDENT> if assignment[FOOD_HOUSE_VAR] == getAssignment(obsPos[0] , obsPos[1]): <NEW_LINE> <INDENT> if assignment[obsVar] == RED_OBS_VAL: <NEW_LINE> <INDENT> prob = PROB_FOOD_RED <NEW_LINE> <DEDENT> elif assignment[obsVar] == BLUE_OBS_VAL: <NEW_LINE> <INDENT> prob = 1 - PROB_FOOD_RED <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> prob = 0 <NEW_LINE> <DEDENT> <DEDENT> elif assignment[GHOST_HOUSE_VAR] == getAssignment(obsPos[0] , obsPos[1]) : <NEW_LINE> <INDENT> if assignment[obsVar] == RED_OBS_VAL: <NEW_LINE> <INDENT> prob = PROB_GHOST_RED <NEW_LINE> <DEDENT> elif assignment[obsVar] == BLUE_OBS_VAL: <NEW_LINE> <INDENT> prob = 1 - PROB_GHOST_RED <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> prob = 0 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if assignment[obsVar] == NO_OBS_VAL: <NEW_LINE> <INDENT> prob = 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> prob = 0 <NEW_LINE> <DEDENT> <DEDENT> obsFactor.setProbability(assignment, prob) <NEW_LINE> <DEDENT> bayesNet.setCPT(obsVar , obsFactor)
Question 2b: Bayes net probabilities Fill the CPT that gives the probability of an observation in each square, given the locations of the food and ghost houses. Refer to the project description for what this probability table looks like. You can use PROB_FOOD_RED and PROB_GHOST_RED from the top of the file. You will need to create a new factor for *each* of 4*7 = 28 observation variables. Don't forget to call bayesNet.setCPT for each factor you create. The XXXPos variables at the beginning of this method contain the (x, y) coordinates of each possible house location. IMPORTANT: Because of the particular choice of probabilities higher up in the Bayes net, it will never be the case that the ghost house and the food house are in the same place. However, the CPT for observations must still include a vaild probability distribution for this case. To conform with the autograder, use the *food house distribution* over assignment[obsVar]s when both the food house and ghost house are assigned to the same cell.
625941b51d351010ab85591b
def snmp_server_v3host_hostip(self, **kwargs): <NEW_LINE> <INDENT> config = ET.Element("config") <NEW_LINE> snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp") <NEW_LINE> if kwargs.pop('delete_snmp_server', False) is True: <NEW_LINE> <INDENT> delete_snmp_server = config.find('.//*snmp-server') <NEW_LINE> delete_snmp_server.set('operation', 'delete') <NEW_LINE> <DEDENT> v3host = ET.SubElement(snmp_server, "v3host") <NEW_LINE> if kwargs.pop('delete_v3host', False) is True: <NEW_LINE> <INDENT> delete_v3host = config.find('.//*v3host') <NEW_LINE> delete_v3host.set('operation', 'delete') <NEW_LINE> <DEDENT> username_key = ET.SubElement(v3host, "username") <NEW_LINE> username_key.text = kwargs.pop('username') <NEW_LINE> if kwargs.pop('delete_username', False) is True: <NEW_LINE> <INDENT> delete_username = config.find('.//*username') <NEW_LINE> delete_username.set('operation', 'delete') <NEW_LINE> <DEDENT> hostip = ET.SubElement(v3host, "hostip") <NEW_LINE> if kwargs.pop('delete_hostip', False) is True: <NEW_LINE> <INDENT> delete_hostip = config.find('.//*hostip') <NEW_LINE> delete_hostip.set('operation', 'delete') <NEW_LINE> <DEDENT> hostip.text = kwargs.pop('hostip') <NEW_LINE> callback = kwargs.pop('callback', self._callback) <NEW_LINE> return callback(config)
Auto Generated Code
625941b50c0af96317bb7fe0
def test_004_certificate__store_cert(self): <NEW_LINE> <INDENT> self.certificate.dbstore.certificate_add.side_effect = Exception('exc_cert_add') <NEW_LINE> with self.assertLogs('test_a2c', level='INFO') as lcm: <NEW_LINE> <INDENT> self.assertFalse(self.certificate._store_cert('cert_name', 'cert', 'raw')) <NEW_LINE> <DEDENT> self.assertIn('CRITICAL:test_a2c:acme2certifier database error in Certificate._store_cert(): exc_cert_add', lcm.output)
test Certificate.store_cert() and check if we get something back
625941b5e1aae11d1e749aaa
def inside(head): <NEW_LINE> <INDENT> return -200 < head.x < 200 and -200 < head.y < 200
Return True if head inside boundaries.
625941b5d8ef3951e3243334
def ensure_service(self, service_name): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> s = self.services[service_name] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> s = BrokerService(self, service_name) <NEW_LINE> self.services[service_name] = s <NEW_LINE> logger.info('Created new service: "{}"'.format(service_name)) <NEW_LINE> <DEDENT> return s
Fetch (or create and fetch) a :class:`BrokerService` instance for the corresponding `service_name`
625941b596565a6dacc8f4cc
def _checkPassword(self, editable = None, data = None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> strength = checkPassword(self.pw.get_text()) <NEW_LINE> _pwq_error = None <NEW_LINE> <DEDENT> except PWQError as e: <NEW_LINE> <INDENT> _pwq_error = e.args[1] <NEW_LINE> strength = 0 <NEW_LINE> <DEDENT> if strength < 50: <NEW_LINE> <INDENT> val = 1 <NEW_LINE> text = _("Weak") <NEW_LINE> self._error = _("The password you have provided is weak") <NEW_LINE> if _pwq_error: <NEW_LINE> <INDENT> self._error += ": %s. " % _pwq_error <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._error += ". " <NEW_LINE> <DEDENT> self._error += _("You will have to press Done twice to confirm it.") <NEW_LINE> <DEDENT> elif strength < 75: <NEW_LINE> <INDENT> val = 2 <NEW_LINE> text = _("Fair") <NEW_LINE> self._error = False <NEW_LINE> <DEDENT> elif strength < 90: <NEW_LINE> <INDENT> val = 3 <NEW_LINE> text = _("Good") <NEW_LINE> self._error = False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> val = 4 <NEW_LINE> text = _("Strong") <NEW_LINE> self._error = False <NEW_LINE> <DEDENT> if not self.pw.get_text(): <NEW_LINE> <INDENT> val = 0 <NEW_LINE> text = _("Empty") <NEW_LINE> self._error = _("The password is empty.") <NEW_LINE> <DEDENT> elif self.confirm.get_text() and self.pw.get_text() != self.confirm.get_text(): <NEW_LINE> <INDENT> self._error = _("The passwords do not match.") <NEW_LINE> <DEDENT> self.pw_bar.set_value(val) <NEW_LINE> self.pw_label.set_text(text) <NEW_LINE> self.clear_info() <NEW_LINE> if self._error: <NEW_LINE> <INDENT> self.set_warning(self._error) <NEW_LINE> self.window.show_all() <NEW_LINE> return False <NEW_LINE> <DEDENT> return True
This method updates the password indicators according to the passwords entered by the user. It is called by the changed Gtk event handler.
625941b5dc8b845886cb532b
def adj(self, v): <NEW_LINE> <INDENT> return self.adj[v]
vertices adjacent to v
625941b5498bea3a759b98a9
def read(self, argv): <NEW_LINE> <INDENT> self.clear() <NEW_LINE> pas = False <NEW_LINE> last = len(argv) - 1 <NEW_LINE> for w, i_w in zipi(argv): <NEW_LINE> <INDENT> if w[0] == "-" and i_w != last and argv[i_w + 1] == "=": <NEW_LINE> <INDENT> ERROR("flags cannot be assigned with a equal sign: {} =".format(w)) <NEW_LINE> <DEDENT> if w == "=" and i_w == last: <NEW_LINE> <INDENT> ERROR("no assignment after equal sign") <NEW_LINE> <DEDENT> if w[0] == "-" and len(w) == 1: <NEW_LINE> <INDENT> ERROR("single '-', syntax not correct") <NEW_LINE> <DEDENT> <DEDENT> self.arg = [] <NEW_LINE> self.flag = [] <NEW_LINE> i = 0 <NEW_LINE> while i < last: <NEW_LINE> <INDENT> i += 1 <NEW_LINE> w = argv[i] <NEW_LINE> w1 = "-" if i >= last else argv[i+1] <NEW_LINE> w2 = " " if i >= last - 1 else argv[i+2] <NEW_LINE> if w1 == "=": <NEW_LINE> <INDENT> key, val = w, argv[i + 2] <NEW_LINE> if self.has_param(key): <NEW_LINE> <INDENT> self.warn("parameter {0} already set to {1} -> overwrite to {2}".format(key, self[key], val)) <NEW_LINE> <DEDENT> self[key] = to_number(val) <NEW_LINE> i += 2 <NEW_LINE> continue <NEW_LINE> <DEDENT> if w[0] == "-": <NEW_LINE> <INDENT> w = w[1:] <NEW_LINE> if w1[0] != "-" and w2 != "=": <NEW_LINE> <INDENT> if w in self.param: <NEW_LINE> <INDENT> self.warn("parameter {0} already set to {1} -> overwrite to {2}".format(w, self[w], w1)) <NEW_LINE> <DEDENT> self[w] = to_number(w1) <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if w in self.flag: <NEW_LINE> <INDENT> self.warn("flag {0} was already set".format(w)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.flag.append(w) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if pas: <NEW_LINE> <INDENT> pas = False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if w in self.arg: <NEW_LINE> <INDENT> self.warn("arg {0} was already set".format(w)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.arg.append(w)
The passed argv is parsed and stored in the right places. Legal notation is: - -flag - -param value - param = value - arg
625941b5460517430c393f87
@template("repo_browser/commit_list.html") <NEW_LINE> def commitlist(request, repository_slug): <NEW_LINE> <INDENT> repository = get_object_or_404( repo_browser.models.Repository, slug=str(repository_slug)) <NEW_LINE> commit_paginator = Paginator( repository.commits.all(), int(request.GET.get("per_page", 50))) <NEW_LINE> commits = commit_paginator.page(int(request.GET.get("page", 1))) <NEW_LINE> return {"repository": repository, "commits": commits}
View a set of commits in a repo
625941b5d58c6744b4257a58
def openfile(self): <NEW_LINE> <INDENT> self.title=self.txt.get() <NEW_LINE> self.title=str(self.title) <NEW_LINE> filepath=askopenfilename(title="Chose file ",filetypes=[('txt files','.txt'),('iperf files','.iperf'),('all files','.*')]) <NEW_LINE> iperf_log(filepath,"r",self.title)
Renvoye le chemain du fichier
625941b55166f23b2e1a4f50
def handle_extra_path(self): <NEW_LINE> <INDENT> if self.extra_path is None: <NEW_LINE> <INDENT> self.extra_path = self.distribution.extra_path <NEW_LINE> <DEDENT> if self.extra_path is not None: <NEW_LINE> <INDENT> if isinstance(self.extra_path, str): <NEW_LINE> <INDENT> self.extra_path = self.extra_path.split(',') <NEW_LINE> <DEDENT> if len(self.extra_path) == 1: <NEW_LINE> <INDENT> path_file = extra_dirs = self.extra_path[0] <NEW_LINE> <DEDENT> elif len(self.extra_path) == 2: <NEW_LINE> <INDENT> path_file, extra_dirs = self.extra_path <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise DistutilsOptionError( "'extra_path' option must be a list, tuple, or " "comma-separated string with 1 or 2 elements") <NEW_LINE> <DEDENT> extra_dirs = convert_path(extra_dirs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> path_file = None <NEW_LINE> extra_dirs = '' <NEW_LINE> <DEDENT> self.path_file = path_file <NEW_LINE> self.extra_dirs = extra_dirs
Set `path_file` and `extra_dirs` using `extra_path`.
625941b5507cdc57c6306ac9
def remove_html_tag(source): <NEW_LINE> <INDENT> if source: <NEW_LINE> <INDENT> result = re.sub('<[^>]+>', '', source) <NEW_LINE> return result <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return str()
移除字符串中的HTML格式标签 :param source: :return:
625941b53346ee7daa2b2b60
def RiFrameAspectRatio(frameratio): <NEW_LINE> <INDENT> _ribout.write('FrameAspectRatio %s\n'%frameratio)
Set the ratio between width and height of the image. Example: RiFrameAspectRatio(4.0/3)
625941b5b5575c28eb68ddf4
def step(self, closure=None): <NEW_LINE> <INDENT> loss = None <NEW_LINE> if closure is not None: <NEW_LINE> <INDENT> loss = closure() <NEW_LINE> <DEDENT> for group in self.param_groups: <NEW_LINE> <INDENT> for p in group['params']: <NEW_LINE> <INDENT> if p.grad is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> grad = p.grad.data <NEW_LINE> if grad.is_sparse: <NEW_LINE> <INDENT> raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') <NEW_LINE> <DEDENT> amsgrad = group['amsgrad'] <NEW_LINE> state = self.state[p] <NEW_LINE> if len(state) == 0: <NEW_LINE> <INDENT> state['step'] = 0 <NEW_LINE> state['exp_avg'] = torch.zeros_like(p.data) <NEW_LINE> state['exp_avg_sq'] = torch.zeros_like(p.data) <NEW_LINE> if len(p.size())!=1: <NEW_LINE> <INDENT> state['followed_weight'] = np.random.randint(p.size(0)),np.random.randint(p.size(1)) <NEW_LINE> <DEDENT> if amsgrad: <NEW_LINE> <INDENT> state['max_exp_avg_sq'] = torch.zeros_like(p.data) <NEW_LINE> <DEDENT> <DEDENT> exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] <NEW_LINE> if amsgrad: <NEW_LINE> <INDENT> max_exp_avg_sq = state['max_exp_avg_sq'] <NEW_LINE> <DEDENT> beta1, beta2 = group['betas'] <NEW_LINE> state['step'] += 1 <NEW_LINE> if group['weight_decay'] != 0: <NEW_LINE> <INDENT> grad.add_(group['weight_decay'], p.data) <NEW_LINE> <DEDENT> exp_avg.mul_(beta1).add_(1 - beta1, grad) <NEW_LINE> exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) <NEW_LINE> if amsgrad: <NEW_LINE> <INDENT> torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) <NEW_LINE> denom = max_exp_avg_sq.sqrt().add_(group['eps']) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> denom = exp_avg_sq.sqrt().add_(group['eps']) <NEW_LINE> <DEDENT> bias_correction1 = 1 - beta1 ** state['step'] <NEW_LINE> bias_correction2 = 1 - beta2 ** state['step'] <NEW_LINE> step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 <NEW_LINE> binary_weight_before_update = torch.sign(p.data) <NEW_LINE> condition_consolidation = (torch.mul(binary_weight_before_update,exp_avg) > 0.0 ) <NEW_LINE> decayed_exp_avg = torch.mul(torch.ones_like(p.data)-torch.pow(torch.tanh(group['meta']*torch.abs(p.data)),2) ,exp_avg) <NEW_LINE> if len(p.size())==1: <NEW_LINE> <INDENT> p.data.addcdiv_(-step_size, exp_avg, denom) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p.data.addcdiv_(-step_size, torch.where(condition_consolidation, decayed_exp_avg, exp_avg) , denom) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return loss
Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss.
625941b5d18da76e235322c8
def numbers_constant_forward(r, maxnum, n): <NEW_LINE> <INDENT> for k in range(r): <NEW_LINE> <INDENT> for r in range(1, maxnum + 1): <NEW_LINE> <INDENT> for j in range(n): <NEW_LINE> <INDENT> print(r, end='') <NEW_LINE> <DEDENT> <DEDENT> print() <NEW_LINE> <DEDENT> print()
Prints a rectangle of numbers, with r rows. Each row has n 1s, then n 2s, then n 3s, etc. up to n maxnum's. For example, when r = 4, maxnum = 7 and n = 3: 111222333444555666777 111222333444555666777 111222333444555666777 111222333444555666777 Notice that there were r = 4 rows; each row had numbers that went from 1 to maxnum = 7; and there were n occurrences of each number on each row. Here is another example, when r = 3, maxnum = 5 and n = 8: 1111111122222222333333334444444455555555 1111111122222222333333334444444455555555 1111111122222222333333334444444455555555 Preconditions: r, maxnum and n are positive integers.
625941b566673b3332b91e8e
def test_getfeature_geojson_with_selection(client): <NEW_LINE> <INDENT> query_string = ( "?" "SERVICE=WFS&" "VERSION=1.1.0&" "REQUEST=GetFeature&" "TYPENAME=lines&" "OUTPUTFORMAT=GeoJSON&" "FEATUREID=lines.1,lines.2&" "MAP={}" ).format(PROJECT) <NEW_LINE> rv = client.get(query_string, PROJECT) <NEW_LINE> assert rv.status_code == 200 <NEW_LINE> assert 'application/vnd.geo+json' in rv.headers.get('Content-Type'), rv.headers <NEW_LINE> layer = _test_vector_layer(rv.file('geojson'), 'GeoJSON', count=2) <NEW_LINE> _test_list(layer.fields().names(), ['id', 'name', 'comment'])
Test GetFeature as GeoJSON with a selection.
625941b54a966d76dd550e03
def recursive_data_display(self, data, cnt = 1, out = True): <NEW_LINE> <INDENT> if type(data) is not dict or len(data) is 0 or type(cnt) is not int or cnt < 0 or type(out) is not bool: <NEW_LINE> <INDENT> if out is True: <NEW_LINE> <INDENT> print(self._invalid_arguments.format( self._red, datetime.now().strftime(self._format_time), self._end, __class__.__name__ + '.' + self.recursive_data_display.__name__ )) <NEW_LINE> <DEDENT> return None <NEW_LINE> <DEDENT> if out is False: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> for key, val in data.items(): <NEW_LINE> <INDENT> if type(val) is not dict: <NEW_LINE> <INDENT> val = str(val) if type(val) is not list else ', '.join(str(v) for v in val) <NEW_LINE> print(('\t' * cnt) + '"' + key + '" - ' + val) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print(('\t' * cnt) + '"' + key + '":') <NEW_LINE> self.recursive_data_display(val, cnt + 1) <NEW_LINE> <DEDENT> <DEDENT> return None
Рекурсивное отображение данных из словаря (dict [, int, bool]) -> None Аргументы: data - Словарь cnt - Рекурсивный счетчик out - Печатать процесс выполнения Возвращает None
625941b58a349b6b435e7f6c
def _base_url(): <NEW_LINE> <INDENT> return __opts__.get("digicert", {}).get( "base_url", "https://www.digicert.com/services/v2/" )
Return the base_url
625941b5e8904600ed9f1d21
def test_right_login_form(self): <NEW_LINE> <INDENT> form_data = {'username': 'TimeaKovacs', 'password': 'Test_password'} <NEW_LINE> form = UserLoginForm(data=form_data) <NEW_LINE> self.assertTrue(form.is_valid())
Test the UserLoginForm
625941b591f36d47f21ac2ed
def test_get_all_boatrooms(self): <NEW_LINE> <INDENT> pass
Test case for get_all_boatrooms Retrieve all boatrooms
625941b50fa83653e4656db6
def test_index_view_with_two_past_questions(self): <NEW_LINE> <INDENT> create_question(question_text="Past question 1.", days=-30) <NEW_LINE> create_question(question_text="Past question 2.", days=-5) <NEW_LINE> print(reverse('polls:index')) <NEW_LINE> response = self.client.get(reverse('polls:index')) <NEW_LINE> self.assertQuerysetEqual( response.context['latest_question_list'], ['<Question: Past question 2.>', '<Question: Past question 1.>'] )
The questions index page may display multiple questions. :return:
625941b576d4e153a657e927
def test_enabled_doesnt_query_site(self): <NEW_LINE> <INDENT> re_django_site_query = re.compile(r'FROM\s+"django_site"') <NEW_LINE> self.enable_saml() <NEW_LINE> provider_count = 5 <NEW_LINE> for i in range(provider_count): <NEW_LINE> <INDENT> self.configure_saml_provider(enabled=True, slug="saml-slug-%s" % i) <NEW_LINE> <DEDENT> with CaptureQueriesContext(connections[DEFAULT_DB_ALIAS]) as cq: <NEW_LINE> <INDENT> enabled_slugs = {p.slug for p in provider.Registry.enabled()} <NEW_LINE> <DEDENT> self.assertEqual(len(enabled_slugs), provider_count) <NEW_LINE> all_queries = [q['sql'] for q in cq.captured_queries] <NEW_LINE> django_site_queries = list(filter(re_django_site_query.search, all_queries)) <NEW_LINE> self.assertEqual(len(django_site_queries), 0)
Regression test for 1+N queries for django_site (ARCHBOM-1139)
625941b5c432627299f04a3c
def ack3Callback(self): <NEW_LINE> <INDENT> if self.checkButtons.getButtonPressed("ACK3"): <NEW_LINE> <INDENT> self.notifyModelTask(["OBQENABLEACK3"]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.notifyModelTask(["OBQDISABLEACK3"])
Called when the ACK3 checkbutton is pressed
625941b5656771135c3eb66a
def create_allow_rule(name: str, addresses: List[str], *, dry_run: bool): <NEW_LINE> <INDENT> args = [ 'gcloud', 'compute', 'firewall-rules', 'create', name, '--project={}'.format(GCP_PROJECT), '--action=ALLOW', '--rules=tcp:80,tcp:443', '--direction=INGRESS', '--priority={}'.format(PRIORITY_ALLOW), '--no-enable-logging', '--source-ranges={}'.format(','.join(addresses)), ] <NEW_LINE> if dry_run: <NEW_LINE> <INDENT> print('実行:', ' '.join(args)) <NEW_LINE> return <NEW_LINE> <DEDENT> return subprocess.run(args, check=True)
特定のレンジを許可するルールを 1 件作成する
625941b54e4d5625662d41d5
def test_missing_scratch_stat(self): <NEW_LINE> <INDENT> out = StringIO() <NEW_LINE> call_command( 'import_weekly_storage', '--homefile=/app/stats/tests/project_usage_home.csv', '--scratchfile=/app/stats/tests/project_usage_scratch.csv', '-d 21', '-m 11', '-y 2020', '-s CF', stdout=out ) <NEW_LINE> self.assertIn( "Couldn't find scratch stats for scw0000...skipping", out.getvalue(), )
Ensure an error is displayed to the user if scratch stats for a project are not found.
625941b530c21e258bdfa294
def __str__(self): <NEW_LINE> <INDENT> __str_object = "" <NEW_LINE> for arg in dir(self): <NEW_LINE> <INDENT> if not arg.startswith("__") and not isinstance(getattr(self, arg), MethodType): <NEW_LINE> <INDENT> __str_object += "{ %-15s } -> " % arg + str(getattr(self, arg)) + '\n' <NEW_LINE> <DEDENT> <DEDENT> return __str_object
:return: a description of one object.
625941b556b00c62f0f14454
def update( self, resource_group_name, search_service_name, service, search_management_request_options=None, **kwargs ): <NEW_LINE> <INDENT> cls = kwargs.pop('cls', None) <NEW_LINE> error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } <NEW_LINE> error_map.update(kwargs.pop('error_map', {})) <NEW_LINE> _client_request_id = None <NEW_LINE> if search_management_request_options is not None: <NEW_LINE> <INDENT> _client_request_id = search_management_request_options.client_request_id <NEW_LINE> <DEDENT> api_version = "2020-08-01" <NEW_LINE> content_type = kwargs.pop("content_type", "application/json") <NEW_LINE> accept = "application/json" <NEW_LINE> url = self.update.metadata['url'] <NEW_LINE> path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } <NEW_LINE> url = self._client.format_url(url, **path_format_arguments) <NEW_LINE> query_parameters = {} <NEW_LINE> query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') <NEW_LINE> header_parameters = {} <NEW_LINE> if _client_request_id is not None: <NEW_LINE> <INDENT> header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", _client_request_id, 'str') <NEW_LINE> <DEDENT> header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') <NEW_LINE> header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') <NEW_LINE> body_content_kwargs = {} <NEW_LINE> body_content = self._serialize.body(service, 'SearchServiceUpdate') <NEW_LINE> body_content_kwargs['content'] = body_content <NEW_LINE> request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) <NEW_LINE> pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) <NEW_LINE> response = pipeline_response.http_response <NEW_LINE> if response.status_code not in [200]: <NEW_LINE> <INDENT> map_error(status_code=response.status_code, response=response, error_map=error_map) <NEW_LINE> raise HttpResponseError(response=response, error_format=ARMErrorFormat) <NEW_LINE> <DEDENT> deserialized = self._deserialize('SearchService', pipeline_response) <NEW_LINE> if cls: <NEW_LINE> <INDENT> return cls(pipeline_response, deserialized, {}) <NEW_LINE> <DEDENT> return deserialized
Updates an existing search service in the given resource group. :param resource_group_name: The name of the resource group within the current subscription. You can obtain this value from the Azure Resource Manager API or the portal. :type resource_group_name: str :param search_service_name: The name of the Azure Cognitive Search service to update. :type search_service_name: str :param service: The definition of the search service to update. :type service: ~azure.mgmt.search.models.SearchServiceUpdate :param search_management_request_options: Parameter group. :type search_management_request_options: ~azure.mgmt.search.models.SearchManagementRequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchService, or the result of cls(response) :rtype: ~azure.mgmt.search.models.SearchService :raises: ~azure.core.exceptions.HttpResponseError
625941b5d10714528d5ffad7
def train(hparams, summary_dir, num_gpus, model_type, max_steps, save_step, data_dir, num_targets, dataset, validate): <NEW_LINE> <INDENT> gpu_id = 3 <NEW_LINE> with tf.device('/gpu:%d' % gpu_id): <NEW_LINE> <INDENT> with tf.Graph().as_default(): <NEW_LINE> <INDENT> features = dict() <NEW_LINE> sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) <NEW_LINE> init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) <NEW_LINE> sess.run(init_op) <NEW_LINE> model = models[model_type](hparams) <NEW_LINE> coord = tf.train.Coordinator() <NEW_LINE> threads = tf.train.start_queue_runners(sess=sess, coord=coord) <NEW_LINE> for i in range(epoch): <NEW_LINE> <INDENT> print('--------------------epoch:{}------------------'.format(i + 1)) <NEW_LINE> data = next_batch(batch_size, 'train') <NEW_LINE> total_correct = 0 <NEW_LINE> total_loss = 0 <NEW_LINE> count = 0 <NEW_LINE> for batched_data in data: <NEW_LINE> <INDENT> X, Y, ngram_num = batched_data <NEW_LINE> count += 1 <NEW_LINE> features['text'], features['labels'] = X, Y <NEW_LINE> features['num_classes'], features['max_ngram_len'] = len(user2idx), max_len <NEW_LINE> features['ngram_num'] = ngram_num <NEW_LINE> out = model._single_tower(gpu_id, features) <NEW_LINE> loss, correct = sess.run([out.losses, out.correct]) <NEW_LINE> total_loss += loss <NEW_LINE> total_correct += correct <NEW_LINE> <DEDENT> print('train_loss: {}, train_acc: {}'.format(total_loss / i, total_correct / (i * batch_size))) <NEW_LINE> print('-------------------valid:{}--------------------'.format(i + 1)) <NEW_LINE> data = next_batch(batch_size, 'valid') <NEW_LINE> total_correct = 0 <NEW_LINE> total_loss = 0 <NEW_LINE> count = 0 <NEW_LINE> for batched_data in data: <NEW_LINE> <INDENT> X, Y, ngram_num = batched_data <NEW_LINE> count += 1 <NEW_LINE> features['text'], features['labels'] = X, Y <NEW_LINE> features['num_classes'], features['max_ngram_len'] = len(user2idx), max_len <NEW_LINE> features['ngram_num'] = ngram_num <NEW_LINE> out = model._single_tower(gpu_id, features) <NEW_LINE> loss, correct = sess.run(out.losses, out.correct) <NEW_LINE> total_loss += loss <NEW_LINE> total_correct += correct <NEW_LINE> <DEDENT> print('valid_loss: {}, valid_acc: {}'.format(total_loss / i, total_correct / (i * batch_size))) <NEW_LINE> <DEDENT> coord.join(threads) <NEW_LINE> sess.close()
Trains a model with batch sizes of 128 to FLAGS.max_steps steps. It will initialize the model with either previously saved model in the summary directory or start from scratch if FLAGS.restart is set or the directory is empty. The training is distributed on num_gpus GPUs. It writes a summary at every step and saves the model every 1500 iterations. Args: hparams: The hyper parameters to build the model graph. summary_dir: The directory to save model and write training summaries. num_gpus: Number of GPUs to use for reading data and computation. model_type: The model architecture category. max_steps: Maximum number of training iterations. save_step: How often the training model should be saved. data_dir: Directory containing the input data. num_targets: Number of objects present in the image. dataset: Name of the dataset for the experiments. validate: If set, use training-validation set for training.
625941b55fc7496912cc377d
def critical_density(wavelength): <NEW_LINE> <INDENT> n_c = electron_rest_mass * epsilon_zero * ((2 * np.pi * lightspeed) / (electric_charge * wavelength)) ** 2 <NEW_LINE> return n_c
Calculates the critical plasma density: .. math:: n_c = m_e arepsilon_0 * ( rac{2 \pi c}{e \lambda})^2 Parameters ---------- wavelength : in meters Returns ------- float the critical plasma density
625941b5187af65679ca4f1c
def __init__(self, tunables, gridding=0, r_minimum=2): <NEW_LINE> <INDENT> super(GCP, self).__init__(tunables, gridding=gridding) <NEW_LINE> self.r_minimum = r_minimum
Extra args: r_minimum: the minimum number of past results this selector needs in order to use gaussian process for prediction. If not enough results are present during a fit(), subsequent calls to propose() will revert to uniform selection.
625941b5a05bb46b383ec626